prompt
stringlengths 162
4.26M
| response
stringlengths 109
5.16M
|
---|---|
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_325( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_309( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_59( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data = 64'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_37 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_43 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_49 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31]
wire _source_ok_T_26 = io_in_a_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31]
wire _source_ok_T_27 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31]
wire _source_ok_T_28 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_29 = _source_ok_T_28 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_33 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {15'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_34 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_34; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_35 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_41 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_47 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_53 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_36 = _source_ok_T_35 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_38 = _source_ok_T_36; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_40; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_42 = _source_ok_T_41 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_44 = _source_ok_T_42; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_46; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_48 = _source_ok_T_47 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_50 = _source_ok_T_48; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_52; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_54 = _source_ok_T_53 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_58; // @[Parameters.scala:1138:31]
wire _source_ok_T_59 = io_in_d_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_5 = _source_ok_T_59; // @[Parameters.scala:1138:31]
wire _source_ok_T_60 = io_in_d_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_60; // @[Parameters.scala:1138:31]
wire _source_ok_T_61 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_61; // @[Parameters.scala:1138:31]
wire _source_ok_T_62 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_63 = _source_ok_T_62 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_64 = _source_ok_T_63 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_65 = _source_ok_T_64 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_67 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _T_975 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_975; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_975; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_1043 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1043; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1043; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1043; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_908 = _T_975 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_908 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_908 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_908 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_908 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_908 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_954 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_954 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_923 = _T_1043 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_923 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_923 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_923 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1019 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1019 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1001 = _T_1043 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1001 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1001 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1001 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File FIFOFixer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.RegionType
import freechips.rocketchip.util.property
class TLFIFOFixer(policy: TLFIFOFixer.Policy = TLFIFOFixer.all)(implicit p: Parameters) extends LazyModule
{
private def fifoMap(seq: Seq[TLSlaveParameters]) = {
val (flatManagers, keepManagers) = seq.partition(policy)
// We need to be careful if one flatManager and one keepManager share an existing domain
// Erring on the side of caution, we will also flatten the keepManager in this case
val flatDomains = Set(flatManagers.flatMap(_.fifoId):_*) // => ID 0
val keepDomains = Set(keepManagers.flatMap(_.fifoId):_*) -- flatDomains // => IDs compacted
// Calculate what the FIFO domains look like after the fixer is applied
val flatMap = flatDomains.map { x => (x, 0) }.toMap
val keepMap = keepDomains.scanLeft((-1,0)) { case ((_,s),x) => (x, s+1) }.toMap
val map = flatMap ++ keepMap
val fixMap = seq.map { m => m.fifoId match {
case None => if (policy(m)) Some(0) else None
case Some(id) => Some(map(id)) // also flattens some who did not ask
} }
// Compress the FIFO domain space of those we are combining
val reMap = flatDomains.scanLeft((-1,-1)) { case ((_,s),x) => (x, s+1) }.toMap
val splatMap = seq.map { m => m.fifoId match {
case None => None
case Some(id) => reMap.lift(id)
} }
(fixMap, splatMap)
}
val node = new AdapterNode(TLImp)(
{ cp => cp },
{ mp =>
val (fixMap, _) = fifoMap(mp.managers)
mp.v1copy(managers = (fixMap zip mp.managers) map { case (id, m) => m.v1copy(fifoId = id) })
}) with TLFormatNode {
override def circuitIdentity = edges.in.map(_.client.clients.filter(c => c.requestFifo && c.sourceId.size > 1).size).sum == 0
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val (fixMap, splatMap) = fifoMap(edgeOut.manager.managers)
// Do we need to serialize the request to this manager?
val a_notFIFO = edgeIn.manager.fastProperty(in.a.bits.address, _.fifoId != Some(0), (b:Boolean) => b.B)
// Compact the IDs of the cases we serialize
val compacted = ((fixMap zip splatMap) zip edgeOut.manager.managers) flatMap {
case ((f, s), m) => if (f == Some(0)) Some(m.v1copy(fifoId = s)) else None
}
val sinks = if (compacted.exists(_.supportsAcquireB)) edgeOut.manager.endSinkId else 0
val a_id = if (compacted.isEmpty) 0.U else
edgeOut.manager.v1copy(managers = compacted, endSinkId = sinks).findFifoIdFast(in.a.bits.address)
val a_noDomain = a_id === 0.U
if (false) {
println(s"FIFOFixer for: ${edgeIn.client.clients.map(_.name).mkString(", ")}")
println(s"make FIFO: ${edgeIn.manager.managers.filter(_.fifoId==Some(0)).map(_.name).mkString(", ")}")
println(s"not FIFO: ${edgeIn.manager.managers.filter(_.fifoId!=Some(0)).map(_.name).mkString(", ")}")
println(s"domains: ${compacted.groupBy(_.name).mapValues(_.map(_.fifoId))}")
println("")
}
// Count beats
val a_first = edgeIn.first(in.a)
val d_first = edgeOut.first(out.d) && out.d.bits.opcode =/= TLMessages.ReleaseAck
// Keep one bit for each source recording if there is an outstanding request that must be made FIFO
// Sources unused in the stall signal calculation should be pruned by DCE
val flight = RegInit(VecInit(Seq.fill(edgeIn.client.endSourceId) { false.B }))
when (a_first && in.a.fire) { flight(in.a.bits.source) := !a_notFIFO }
when (d_first && in.d.fire) { flight(in.d.bits.source) := false.B }
val stalls = edgeIn.client.clients.filter(c => c.requestFifo && c.sourceId.size > 1).map { c =>
val a_sel = c.sourceId.contains(in.a.bits.source)
val id = RegEnable(a_id, in.a.fire && a_sel && !a_notFIFO)
val track = flight.slice(c.sourceId.start, c.sourceId.end)
a_sel && a_first && track.reduce(_ || _) && (a_noDomain || id =/= a_id)
}
val stall = stalls.foldLeft(false.B)(_||_)
out.a <> in.a
in.d <> out.d
out.a.valid := in.a.valid && (a_notFIFO || !stall)
in.a.ready := out.a.ready && (a_notFIFO || !stall)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> out.b
out.c <> in .c
out.e <> in .e
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
//Functional cover properties
property.cover(in.a.valid && stall, "COVER FIFOFIXER STALL", "Cover: Stall occured for a valid transaction")
val SourceIdFIFOed = RegInit(0.U(edgeIn.client.endSourceId.W))
val SourceIdSet = WireDefault(0.U(edgeIn.client.endSourceId.W))
val SourceIdClear = WireDefault(0.U(edgeIn.client.endSourceId.W))
when (a_first && in.a.fire && !a_notFIFO) {
SourceIdSet := UIntToOH(in.a.bits.source)
}
when (d_first && in.d.fire) {
SourceIdClear := UIntToOH(in.d.bits.source)
}
SourceIdFIFOed := SourceIdFIFOed | SourceIdSet
val allIDs_FIFOed = SourceIdFIFOed===Fill(SourceIdFIFOed.getWidth, 1.U)
property.cover(allIDs_FIFOed, "COVER all sources", "Cover: FIFOFIXER covers all Source IDs")
//property.cover(flight.reduce(_ && _), "COVER full", "Cover: FIFO is full with all Source IDs")
property.cover(!(flight.reduce(_ || _)), "COVER empty", "Cover: FIFO is empty")
property.cover(SourceIdSet > 0.U, "COVER at least one push", "Cover: At least one Source ID is pushed")
property.cover(SourceIdClear > 0.U, "COVER at least one pop", "Cover: At least one Source ID is popped")
}
}
}
object TLFIFOFixer
{
// Which slaves should have their FIFOness combined?
// NOTE: this transformation is still only applied for masters with requestFifo
type Policy = TLSlaveParameters => Boolean
import RegionType._
val all: Policy = m => true
val allFIFO: Policy = m => m.fifoId.isDefined
val allVolatile: Policy = m => m.regionType <= VOLATILE
def apply(policy: Policy = all)(implicit p: Parameters): TLNode =
{
val fixer = LazyModule(new TLFIFOFixer(policy))
fixer.node
}
}
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File MemoryBus.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.subsystem
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.devices.tilelink.{BuiltInDevices, HasBuiltInDeviceParams, BuiltInErrorDeviceParams, BuiltInZeroDeviceParams}
import freechips.rocketchip.tilelink.{
ReplicatedRegion, HasTLBusParams, HasRegionReplicatorParams, TLBusWrapper,
TLBusWrapperInstantiationLike, RegionReplicator, TLXbar, TLInwardNode,
TLOutwardNode, ProbePicker, TLEdge, TLFIFOFixer
}
import freechips.rocketchip.util.Location
/** Parameterization of the memory-side bus created for each memory channel */
case class MemoryBusParams(
beatBytes: Int,
blockBytes: Int,
dtsFrequency: Option[BigInt] = None,
zeroDevice: Option[BuiltInZeroDeviceParams] = None,
errorDevice: Option[BuiltInErrorDeviceParams] = None,
replication: Option[ReplicatedRegion] = None)
extends HasTLBusParams
with HasBuiltInDeviceParams
with HasRegionReplicatorParams
with TLBusWrapperInstantiationLike
{
def instantiate(context: HasTileLinkLocations, loc: Location[TLBusWrapper])(implicit p: Parameters): MemoryBus = {
val mbus = LazyModule(new MemoryBus(this, loc.name))
mbus.suggestName(loc.name)
context.tlBusWrapperLocationMap += (loc -> mbus)
mbus
}
}
/** Wrapper for creating TL nodes from a bus connected to the back of each mem channel */
class MemoryBus(params: MemoryBusParams, name: String = "memory_bus")(implicit p: Parameters)
extends TLBusWrapper(params, name)(p)
{
private val replicator = params.replication.map(r => LazyModule(new RegionReplicator(r)))
val prefixNode = replicator.map { r =>
r.prefix := addressPrefixNexusNode
addressPrefixNexusNode
}
private val xbar = LazyModule(new TLXbar(nameSuffix = Some(name))).suggestName(busName + "_xbar")
val inwardNode: TLInwardNode =
replicator.map(xbar.node :*=* TLFIFOFixer(TLFIFOFixer.all) :*=* _.node)
.getOrElse(xbar.node :*=* TLFIFOFixer(TLFIFOFixer.all))
val outwardNode: TLOutwardNode = ProbePicker() :*= xbar.node
def busView: TLEdge = xbar.node.edges.in.head
val builtInDevices: BuiltInDevices = BuiltInDevices.attach(params, outwardNode)
}
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File ClockGroup.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.prci
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.resources.FixedClockResource
case class ClockGroupingNode(groupName: String)(implicit valName: ValName)
extends MixedNexusNode(ClockGroupImp, ClockImp)(
dFn = { _ => ClockSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) })
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupingNode(groupName)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
require (node.in.size == 1)
require (in.member.size == out.size)
(in.member.data zip out) foreach { case (i, o) => o := i }
}
}
object ClockGroup
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node
}
case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))})
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupAggregateNode(groupName)
override lazy val desiredName = s"ClockGroupAggregator_$groupName"
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in.unzip
val (out, _) = node.out.unzip
val outputs = out.flatMap(_.member.data)
require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1")
require (in.head.member.size == outputs.size)
in.head.member.data.zip(outputs).foreach { case (i, o) => o := i }
}
}
object ClockGroupAggregator
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node
}
class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val (out, _) = node.out.unzip
out.map { out: ClockGroupBundle =>
out.member.data.foreach { o =>
o.clock := clock; o.reset := reset }
}
}
}
object SimpleClockGroupSource
{
def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node
}
case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName)
extends NexusNode(ClockImp)(
dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) },
uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) },
inputRequiresOutput = false) {
def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix)))
}
class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule
{
val node = new FixedClockBroadcastNode(fixedClockOpt) {
override def circuitIdentity = outputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
override def desiredName = s"FixedClockBroadcast_${out.size}"
require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock")
out.foreach { _ := in }
}
}
object FixedClockBroadcast
{
def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node
}
case class PRCIClockGroupNode()(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { _ => ClockGroupSinkParameters("prci", Nil) },
outputRequiresInput = false)
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File ProbePicker.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, IdRange}
/* A ProbePicker is used to unify multiple cache banks into one logical cache */
class ProbePicker(implicit p: Parameters) extends LazyModule
{
val node = TLAdapterNode(
clientFn = { p =>
// The ProbePicker assembles multiple clients based on the assumption they are contiguous in the clients list
// This should be true for custers of xbar :=* BankBinder connections
def combine(next: TLMasterParameters, pair: (TLMasterParameters, Seq[TLMasterParameters])) = {
val (head, output) = pair
if (head.visibility.exists(x => next.visibility.exists(_.overlaps(x)))) {
(next, head +: output) // pair is not banked, push head without merging
} else {
def redact(x: TLMasterParameters) = x.v1copy(sourceId = IdRange(0,1), nodePath = Nil, visibility = Seq(AddressSet(0, ~0)))
require (redact(next) == redact(head), s"${redact(next)} != ${redact(head)}")
val merge = head.v1copy(
sourceId = IdRange(
head.sourceId.start min next.sourceId.start,
head.sourceId.end max next.sourceId.end),
visibility = AddressSet.unify(head.visibility ++ next.visibility))
(merge, output)
}
}
val myNil: Seq[TLMasterParameters] = Nil
val (head, output) = p.clients.init.foldRight((p.clients.last, myNil))(combine)
p.v1copy(clients = head +: output)
},
managerFn = { p => p })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out <> in
// Based on address, adjust source to route to the correct bank
if (edgeIn.client.clients.size != edgeOut.client.clients.size) {
in.b.bits.source := Mux1H(
edgeOut.client.clients.map(_.sourceId contains out.b.bits.source),
edgeOut.client.clients.map { c =>
val banks = edgeIn.client.clients.filter(c.sourceId contains _.sourceId)
if (banks.size == 1) {
out.b.bits.source // allow sharing the value between single-bank cases
} else {
Mux1H(
banks.map(_.visibility.map(_ contains out.b.bits.address).reduce(_ || _)),
banks.map(_.sourceId.start.U))
}
}
)
}
}
}
}
object ProbePicker
{
def apply()(implicit p: Parameters): TLNode = {
val picker = LazyModule(new ProbePicker)
picker.node
}
}
File LazyScope.scala:
package org.chipsalliance.diplomacy.lazymodule
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.ValName
/** Allows dynamic creation of [[Module]] hierarchy and "shoving" logic into a [[LazyModule]]. */
trait LazyScope {
this: LazyModule =>
override def toString: String = s"LazyScope named $name"
/** Evaluate `body` in the current [[LazyModule.scope]] */
def apply[T](body: => T): T = {
// Preserve the previous value of the [[LazyModule.scope]], because when calling [[apply]] function,
// [[LazyModule.scope]] will be altered.
val saved = LazyModule.scope
// [[LazyModule.scope]] stack push.
LazyModule.scope = Some(this)
// Evaluate [[body]] in the current `scope`, saving the result to [[out]].
val out = body
// Check that the `scope` after evaluating `body` is the same as when we started.
require(LazyModule.scope.isDefined, s"LazyScope $name tried to exit, but scope was empty!")
require(
LazyModule.scope.get eq this,
s"LazyScope $name exited before LazyModule ${LazyModule.scope.get.name} was closed"
)
// [[LazyModule.scope]] stack pop.
LazyModule.scope = saved
out
}
}
/** Used to automatically create a level of module hierarchy (a [[SimpleLazyModule]]) within which [[LazyModule]]s can
* be instantiated and connected.
*
* It will instantiate a [[SimpleLazyModule]] to manage evaluation of `body` and evaluate `body` code snippets in this
* scope.
*/
object LazyScope {
/** Create a [[LazyScope]] with an implicit instance name.
*
* @param body
* code executed within the generated [[SimpleLazyModule]].
* @param valName
* instance name of generated [[SimpleLazyModule]].
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
body: => T
)(
implicit valName: ValName,
p: Parameters
): T = {
apply(valName.value, "SimpleLazyModule", None)(body)(p)
}
/** Create a [[LazyScope]] with an explicitly defined instance name.
*
* @param name
* instance name of generated [[SimpleLazyModule]].
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
name: String
)(body: => T
)(
implicit p: Parameters
): T = {
apply(name, "SimpleLazyModule", None)(body)(p)
}
/** Create a [[LazyScope]] with an explicit instance and class name, and control inlining.
*
* @param name
* instance name of generated [[SimpleLazyModule]].
* @param desiredModuleName
* class name of generated [[SimpleLazyModule]].
* @param overrideInlining
* tell FIRRTL that this [[SimpleLazyModule]]'s module should be inlined.
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
name: String,
desiredModuleName: String,
overrideInlining: Option[Boolean] = None
)(body: => T
)(
implicit p: Parameters
): T = {
val scope = LazyModule(new SimpleLazyModule with LazyScope {
override lazy val desiredName = desiredModuleName
override def shouldBeInlined = overrideInlining.getOrElse(super.shouldBeInlined)
}).suggestName(name)
scope {
body
}
}
/** Create a [[LazyScope]] to temporarily group children for some reason, but tell Firrtl to inline it.
*
* For example, we might want to control a set of children's clocks but then not keep the parent wrapper.
*
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def inline[T](
body: => T
)(
implicit p: Parameters
): T = {
apply("noname", "ShouldBeInlined", Some(false))(body)(p)
}
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue}
import freechips.rocketchip.util.BundleField
// Trades off slave port proximity against routing resource cost
object ForceFanout
{
def apply[T](
a: TriStateValue = TriStateValue.unset,
b: TriStateValue = TriStateValue.unset,
c: TriStateValue = TriStateValue.unset,
d: TriStateValue = TriStateValue.unset,
e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) =
{
body(p.alterPartial {
case ForceFanoutKey => p(ForceFanoutKey) match {
case ForceFanoutParams(pa, pb, pc, pd, pe) =>
ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe))
}
})
}
}
private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean)
private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false))
class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
val fifoIdMapper = fifoIdFactory()
port.managers map { manager => manager.v1copy(
fifoId = manager.fifoId.map(fifoIdMapper(_))
)}
}
)
}
){
override def circuitIdentity = outputs.size == 1 && inputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
if ((node.in.size * node.out.size) > (8*32)) {
println (s"!!! WARNING !!!")
println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.")
println (s"!!! WARNING !!!")
}
val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle))
override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_")
TLXbar.circuit(policy, node.in, node.out)
}
}
object TLXbar
{
def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId))
def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId))
def assignRanges(sizes: Seq[Int]) = {
val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) }
val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size
val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions
val ranges = (tuples zip starts) map { case ((sz, i), st) =>
(if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i)
}
ranges.sortBy(_._2).map(_._1) // Restore orignal order
}
def relabeler() = {
var idFactory = 0
() => {
val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int]
(x: Int) => {
if (fifoMap.contains(x)) fifoMap(x) else {
val out = idFactory
idFactory = idFactory + 1
fifoMap += (x -> out)
out
}
}
}
}
def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) {
val (io_in, edgesIn) = seqIn.unzip
val (io_out, edgesOut) = seqOut.unzip
// Not every master need connect to every slave on every channel; determine which connections are necessary
val reachableIO = edgesIn.map { cp => edgesOut.map { mp =>
cp.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)}}}}
}.toVector}.toVector
val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED)
}.toVector}.toVector
val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector}.toVector
val connectAIO = reachableIO
val connectBIO = probeIO
val connectCIO = releaseIO
val connectDIO = reachableIO
val connectEIO = releaseIO
def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } }
val connectAOI = transpose(connectAIO)
val connectBOI = transpose(connectBIO)
val connectCOI = transpose(connectCIO)
val connectDOI = transpose(connectDIO)
val connectEOI = transpose(connectEIO)
// Grab the port ID mapping
val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
// We need an intermediate size of bundle with the widest possible identifiers
val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params))
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
// Transform input bundle sources (sinks use global namespace on both sides)
val in = Wire(Vec(io_in.size, TLBundle(wide_bundle)))
for (i <- 0 until in.size) {
val r = inputIdRanges(i)
if (connectAIO(i).exists(x=>x)) {
in(i).a.bits.user := DontCare
in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll
in(i).a.bits.source := io_in(i).a.bits.source | r.start.U
} else {
in(i).a := DontCare
io_in(i).a := DontCare
in(i).a.valid := false.B
io_in(i).a.ready := true.B
}
if (connectBIO(i).exists(x=>x)) {
io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll
io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size)
} else {
in(i).b := DontCare
io_in(i).b := DontCare
in(i).b.ready := true.B
io_in(i).b.valid := false.B
}
if (connectCIO(i).exists(x=>x)) {
in(i).c.bits.user := DontCare
in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll
in(i).c.bits.source := io_in(i).c.bits.source | r.start.U
} else {
in(i).c := DontCare
io_in(i).c := DontCare
in(i).c.valid := false.B
io_in(i).c.ready := true.B
}
if (connectDIO(i).exists(x=>x)) {
io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll
io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size)
} else {
in(i).d := DontCare
io_in(i).d := DontCare
in(i).d.ready := true.B
io_in(i).d.valid := false.B
}
if (connectEIO(i).exists(x=>x)) {
in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll
} else {
in(i).e := DontCare
io_in(i).e := DontCare
in(i).e.valid := false.B
io_in(i).e.ready := true.B
}
}
// Transform output bundle sinks (sources use global namespace on both sides)
val out = Wire(Vec(io_out.size, TLBundle(wide_bundle)))
for (o <- 0 until out.size) {
val r = outputIdRanges(o)
if (connectAOI(o).exists(x=>x)) {
out(o).a.bits.user := DontCare
io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll
} else {
out(o).a := DontCare
io_out(o).a := DontCare
out(o).a.ready := true.B
io_out(o).a.valid := false.B
}
if (connectBOI(o).exists(x=>x)) {
out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll
} else {
out(o).b := DontCare
io_out(o).b := DontCare
out(o).b.valid := false.B
io_out(o).b.ready := true.B
}
if (connectCOI(o).exists(x=>x)) {
out(o).c.bits.user := DontCare
io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll
} else {
out(o).c := DontCare
io_out(o).c := DontCare
out(o).c.ready := true.B
io_out(o).c.valid := false.B
}
if (connectDOI(o).exists(x=>x)) {
out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll
out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U
} else {
out(o).d := DontCare
io_out(o).d := DontCare
out(o).d.valid := false.B
io_out(o).d.ready := true.B
}
if (connectEOI(o).exists(x=>x)) {
io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll
io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size)
} else {
out(o).e := DontCare
io_out(o).e := DontCare
out(o).e.ready := true.B
io_out(o).e.valid := false.B
}
}
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
// Based on input=>output connectivity, create per-input minimal address decode circuits
val requiredAC = (connectAIO ++ connectCIO).distinct
val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO =>
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
// Print the address mapping
if (false) {
println("Xbar mapping:")
route_addrs.foreach { p =>
print(" ")
p.foreach { a => print(s" ${a}") }
println("")
}
println("--")
}
(connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _)))
}.toMap
// Print the ID mapping
if (false) {
println(s"XBar mapping:")
(edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) =>
println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}")
}
println("")
}
val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) }
val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) }
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } }
val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } }
val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } }
val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) }
val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) }
val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) }
val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) }
val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) }
// Fanout the input sources to the output sinks
val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) })
val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) })
val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) })
val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) })
val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) })
// Arbitrate amongst the sources
for (o <- 0 until out.size) {
TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*)
TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*)
TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*)
filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B }
}
for (i <- 0 until in.size) {
TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*)
TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*)
filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B }
filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B }
}
}
def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val xbar = LazyModule(new TLXbar(policy, nameSuffix))
xbar.node
}
// Replicate an input port to each output port
def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = {
val filtered = Wire(Vec(select.size, chiselTypeOf(input)))
for (i <- 0 until select.size) {
filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits)
filtered(i).valid := input.valid && (select(i) || (select.size == 1).B)
}
input.ready := Mux1H(select, filtered.map(_.ready))
filtered
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Xbar"))
val xbar = LazyModule(new TLXbar)
xbar.node := TLDelayer(0.1) := model.node := fuzz.node
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val xbar = LazyModule(new TLXbar)
val fuzzers = (0 until nClients) map { n =>
val fuzz = LazyModule(new TLFuzzer(txns))
xbar.node := TLDelayer(0.1) := fuzz.node
fuzz
}
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzzers.last.module.io.finished
}
}
class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
| module MemoryBus( // @[ClockDomain.scala:14:9]
input auto_buffer_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_buffer_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [27:0] auto_buffer_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_buffer_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_buffer_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_buffer_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_buffer_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_buffer_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_resp, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_id, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_data, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_resp, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_last, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_1_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_1_reset, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_mbus_clock_groups_in_member_mbus_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_mbus_clock_groups_in_member_mbus_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_bus_xing_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_bus_xing_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_bus_xing_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_bus_xing_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_bus_xing_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_bus_xing_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_bus_xing_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_bus_xing_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_bus_xing_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_bus_xing_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_bus_xing_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_bus_xing_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_bus_xing_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_bus_xing_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_bus_xing_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire coupler_to_mbusscratchpad00_auto_tl_out_d_valid; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_out_d_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_to_mbusscratchpad00_auto_tl_out_d_bits_data; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_out_d_bits_denied; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_out_d_bits_sink; // @[LazyModuleImp.scala:138:7]
wire [4:0] coupler_to_mbusscratchpad00_auto_tl_out_d_bits_source; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_mbusscratchpad00_auto_tl_out_d_bits_size; // @[LazyModuleImp.scala:138:7]
wire [1:0] coupler_to_mbusscratchpad00_auto_tl_out_d_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_mbusscratchpad00_auto_tl_out_d_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_out_a_ready; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_in_d_ready; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_in_a_valid; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_in_a_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_data; // @[LazyModuleImp.scala:138:7]
wire [7:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_mask; // @[LazyModuleImp.scala:138:7]
wire [27:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_address; // @[LazyModuleImp.scala:138:7]
wire [4:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_source; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_size; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [4:0] xbar_in_0_d_bits_source; // @[Xbar.scala:159:18]
wire [4:0] xbar_in_0_a_bits_source; // @[Xbar.scala:159:18]
wire xbar_auto_anon_out_d_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_d_bits_corrupt; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_out_d_bits_data; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_d_bits_denied; // @[Xbar.scala:74:9]
wire [4:0] xbar_auto_anon_out_d_bits_source; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_d_bits_size; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_d_bits_opcode; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_a_ready; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_d_ready; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_a_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_a_bits_corrupt; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_in_a_bits_data; // @[Xbar.scala:74:9]
wire [7:0] xbar_auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9]
wire [31:0] xbar_auto_anon_in_a_bits_address; // @[Xbar.scala:74:9]
wire [4:0] xbar_auto_anon_in_a_bits_source; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_a_bits_size; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_a_bits_param; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9]
wire buffer_auto_in_d_valid; // @[Buffer.scala:40:9]
wire buffer_auto_in_d_ready; // @[Buffer.scala:40:9]
wire buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9]
wire [63:0] buffer_auto_in_d_bits_data; // @[Buffer.scala:40:9]
wire buffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9]
wire buffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9]
wire [4:0] buffer_auto_in_d_bits_source; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_in_d_bits_size; // @[Buffer.scala:40:9]
wire [1:0] buffer_auto_in_d_bits_param; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_valid; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_ready; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire [63:0] buffer_auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire [7:0] buffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [31:0] buffer_auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [4:0] buffer_auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire fixer_auto_anon_out_d_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_out_d_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_auto_anon_out_d_bits_source; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_d_bits_size; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_auto_anon_out_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_a_ready; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_d_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_d_ready; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_in_d_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_auto_anon_in_d_bits_source; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_d_bits_size; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_auto_anon_in_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_a_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_a_ready; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_in_a_bits_data; // @[FIFOFixer.scala:50:9]
wire [7:0] fixer_auto_anon_in_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [31:0] fixer_auto_anon_in_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_auto_anon_in_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_a_bits_size; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire mbus_clock_groups_auto_out_member_mbus_0_reset; // @[ClockGroup.scala:53:9]
wire mbus_clock_groups_auto_out_member_mbus_0_clock; // @[ClockGroup.scala:53:9]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_a_ready; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_valid; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_opcode; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27]
wire [4:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_denied; // @[LazyScope.scala:98:27]
wire [63:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_corrupt; // @[LazyScope.scala:98:27]
wire _picker_auto_in_1_a_ready; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_1_d_valid; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_in_1_d_bits_opcode; // @[ProbePicker.scala:69:28]
wire [1:0] _picker_auto_in_1_d_bits_param; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_in_1_d_bits_size; // @[ProbePicker.scala:69:28]
wire [4:0] _picker_auto_in_1_d_bits_source; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_1_d_bits_sink; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_1_d_bits_denied; // @[ProbePicker.scala:69:28]
wire [63:0] _picker_auto_in_1_d_bits_data; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_1_d_bits_corrupt; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_0_a_ready; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_0_d_valid; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_in_0_d_bits_opcode; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_in_0_d_bits_size; // @[ProbePicker.scala:69:28]
wire [4:0] _picker_auto_in_0_d_bits_source; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_0_d_bits_denied; // @[ProbePicker.scala:69:28]
wire [63:0] _picker_auto_in_0_d_bits_data; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_0_d_bits_corrupt; // @[ProbePicker.scala:69:28]
wire _picker_auto_out_0_a_valid; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_out_0_a_bits_opcode; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_out_0_a_bits_param; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_out_0_a_bits_size; // @[ProbePicker.scala:69:28]
wire [4:0] _picker_auto_out_0_a_bits_source; // @[ProbePicker.scala:69:28]
wire [31:0] _picker_auto_out_0_a_bits_address; // @[ProbePicker.scala:69:28]
wire [7:0] _picker_auto_out_0_a_bits_mask; // @[ProbePicker.scala:69:28]
wire [63:0] _picker_auto_out_0_a_bits_data; // @[ProbePicker.scala:69:28]
wire _picker_auto_out_0_a_bits_corrupt; // @[ProbePicker.scala:69:28]
wire _picker_auto_out_0_d_ready; // @[ProbePicker.scala:69:28]
wire _mbus_xbar_auto_anon_out_1_a_valid; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_1_a_bits_opcode; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_1_a_bits_param; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_1_a_bits_size; // @[MemoryBus.scala:47:32]
wire [4:0] _mbus_xbar_auto_anon_out_1_a_bits_source; // @[MemoryBus.scala:47:32]
wire [27:0] _mbus_xbar_auto_anon_out_1_a_bits_address; // @[MemoryBus.scala:47:32]
wire [7:0] _mbus_xbar_auto_anon_out_1_a_bits_mask; // @[MemoryBus.scala:47:32]
wire [63:0] _mbus_xbar_auto_anon_out_1_a_bits_data; // @[MemoryBus.scala:47:32]
wire _mbus_xbar_auto_anon_out_1_a_bits_corrupt; // @[MemoryBus.scala:47:32]
wire _mbus_xbar_auto_anon_out_1_d_ready; // @[MemoryBus.scala:47:32]
wire _mbus_xbar_auto_anon_out_0_a_valid; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_0_a_bits_opcode; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_0_a_bits_param; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_0_a_bits_size; // @[MemoryBus.scala:47:32]
wire [4:0] _mbus_xbar_auto_anon_out_0_a_bits_source; // @[MemoryBus.scala:47:32]
wire [31:0] _mbus_xbar_auto_anon_out_0_a_bits_address; // @[MemoryBus.scala:47:32]
wire [7:0] _mbus_xbar_auto_anon_out_0_a_bits_mask; // @[MemoryBus.scala:47:32]
wire [63:0] _mbus_xbar_auto_anon_out_0_a_bits_data; // @[MemoryBus.scala:47:32]
wire _mbus_xbar_auto_anon_out_0_a_bits_corrupt; // @[MemoryBus.scala:47:32]
wire _mbus_xbar_auto_anon_out_0_d_ready; // @[MemoryBus.scala:47:32]
wire auto_buffer_out_a_ready_0 = auto_buffer_out_a_ready; // @[ClockDomain.scala:14:9]
wire auto_buffer_out_d_valid_0 = auto_buffer_out_d_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_buffer_out_d_bits_opcode_0 = auto_buffer_out_d_bits_opcode; // @[ClockDomain.scala:14:9]
wire [1:0] auto_buffer_out_d_bits_param_0 = auto_buffer_out_d_bits_param; // @[ClockDomain.scala:14:9]
wire [2:0] auto_buffer_out_d_bits_size_0 = auto_buffer_out_d_bits_size; // @[ClockDomain.scala:14:9]
wire [4:0] auto_buffer_out_d_bits_source_0 = auto_buffer_out_d_bits_source; // @[ClockDomain.scala:14:9]
wire auto_buffer_out_d_bits_sink_0 = auto_buffer_out_d_bits_sink; // @[ClockDomain.scala:14:9]
wire auto_buffer_out_d_bits_denied_0 = auto_buffer_out_d_bits_denied; // @[ClockDomain.scala:14:9]
wire [63:0] auto_buffer_out_d_bits_data_0 = auto_buffer_out_d_bits_data; // @[ClockDomain.scala:14:9]
wire auto_buffer_out_d_bits_corrupt_0 = auto_buffer_out_d_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_ready_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_ready; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_ready_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_ready; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_valid_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_valid; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_id_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_id; // @[ClockDomain.scala:14:9]
wire [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_resp_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_resp; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_ready_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_ready; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_valid_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_valid; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_id_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_id; // @[ClockDomain.scala:14:9]
wire [63:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_data_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_data; // @[ClockDomain.scala:14:9]
wire [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_resp_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_resp; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_last_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_last; // @[ClockDomain.scala:14:9]
wire auto_mbus_clock_groups_in_member_mbus_0_clock_0 = auto_mbus_clock_groups_in_member_mbus_0_clock; // @[ClockDomain.scala:14:9]
wire auto_mbus_clock_groups_in_member_mbus_0_reset_0 = auto_mbus_clock_groups_in_member_mbus_0_reset; // @[ClockDomain.scala:14:9]
wire auto_bus_xing_in_a_valid_0 = auto_bus_xing_in_a_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_bus_xing_in_a_bits_opcode_0 = auto_bus_xing_in_a_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] auto_bus_xing_in_a_bits_param_0 = auto_bus_xing_in_a_bits_param; // @[ClockDomain.scala:14:9]
wire [2:0] auto_bus_xing_in_a_bits_size_0 = auto_bus_xing_in_a_bits_size; // @[ClockDomain.scala:14:9]
wire [4:0] auto_bus_xing_in_a_bits_source_0 = auto_bus_xing_in_a_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] auto_bus_xing_in_a_bits_address_0 = auto_bus_xing_in_a_bits_address; // @[ClockDomain.scala:14:9]
wire [7:0] auto_bus_xing_in_a_bits_mask_0 = auto_bus_xing_in_a_bits_mask; // @[ClockDomain.scala:14:9]
wire [63:0] auto_bus_xing_in_a_bits_data_0 = auto_bus_xing_in_a_bits_data; // @[ClockDomain.scala:14:9]
wire auto_bus_xing_in_a_bits_corrupt_0 = auto_bus_xing_in_a_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_bus_xing_in_d_ready_0 = auto_bus_xing_in_d_ready; // @[ClockDomain.scala:14:9]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire mbus_clock_groups_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire mbus_clock_groups_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire mbus_clock_groups__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire clockGroup_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire clockGroup_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire clockGroup__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire broadcast_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire broadcast_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire broadcast__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire fixer__flight_WIRE_0 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_1 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_2 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_3 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_4 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_5 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_6 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_7 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_8 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_9 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_10 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_11 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_12 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_13 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_14 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_15 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_16 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_17 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_18 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_19 = 1'h0; // @[FIFOFixer.scala:79:35]
wire xbar_auto_anon_in_d_bits_sink = 1'h0; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_d_bits_sink = 1'h0; // @[Xbar.scala:74:9]
wire xbar_anonOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17]
wire xbar_anonIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire xbar_in_0_d_bits_sink = 1'h0; // @[Xbar.scala:159:18]
wire xbar_out_0_d_bits_sink = 1'h0; // @[Xbar.scala:216:19]
wire xbar__out_0_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53]
wire xbar__addressC_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire xbar__addressC_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire xbar__addressC_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire xbar__addressC_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire xbar__addressC_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire xbar__addressC_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire xbar__requestBOI_WIRE_ready = 1'h0; // @[Bundles.scala:264:74]
wire xbar__requestBOI_WIRE_valid = 1'h0; // @[Bundles.scala:264:74]
wire xbar__requestBOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire xbar__requestBOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61]
wire xbar__requestBOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61]
wire xbar__requestBOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire xbar__requestBOI_T = 1'h0; // @[Parameters.scala:54:10]
wire xbar__requestDOI_T = 1'h0; // @[Parameters.scala:54:10]
wire xbar__requestEIO_WIRE_ready = 1'h0; // @[Bundles.scala:267:74]
wire xbar__requestEIO_WIRE_valid = 1'h0; // @[Bundles.scala:267:74]
wire xbar__requestEIO_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74]
wire xbar__requestEIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61]
wire xbar__requestEIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61]
wire xbar__requestEIO_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61]
wire xbar__beatsBO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74]
wire xbar__beatsBO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74]
wire xbar__beatsBO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire xbar__beatsBO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61]
wire xbar__beatsBO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61]
wire xbar__beatsBO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire xbar__beatsBO_opdata_T = 1'h0; // @[Edges.scala:97:37]
wire xbar__beatsCI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire xbar__beatsCI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire xbar__beatsCI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire xbar__beatsCI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire xbar__beatsCI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire xbar__beatsCI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire xbar_beatsCI_opdata = 1'h0; // @[Edges.scala:102:36]
wire xbar__beatsEI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74]
wire xbar__beatsEI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74]
wire xbar__beatsEI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74]
wire xbar__beatsEI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61]
wire xbar__beatsEI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61]
wire xbar__beatsEI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61]
wire xbar__portsBIO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74]
wire xbar__portsBIO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74]
wire xbar__portsBIO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire xbar__portsBIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61]
wire xbar__portsBIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61]
wire xbar__portsBIO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire xbar_portsBIO_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire xbar_portsBIO_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire xbar_portsBIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire xbar__portsBIO_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire xbar__portsCOI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire xbar__portsCOI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire xbar__portsCOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire xbar__portsCOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire xbar__portsCOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire xbar__portsCOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire xbar_portsCOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire xbar_portsCOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire xbar_portsCOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire xbar__portsCOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire xbar_portsDIO_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24]
wire xbar__portsEOI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74]
wire xbar__portsEOI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74]
wire xbar__portsEOI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74]
wire xbar__portsEOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61]
wire xbar__portsEOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61]
wire xbar__portsEOI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61]
wire xbar_portsEOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire xbar_portsEOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire xbar_portsEOI_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24]
wire xbar__portsEOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire [1:0] xbar_auto_anon_in_d_bits_param = 2'h0; // @[Xbar.scala:74:9]
wire [1:0] xbar_auto_anon_out_d_bits_param = 2'h0; // @[Xbar.scala:74:9]
wire [1:0] xbar_anonOut_d_bits_param = 2'h0; // @[MixedNode.scala:542:17]
wire [1:0] xbar_anonIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] xbar_in_0_d_bits_param = 2'h0; // @[Xbar.scala:159:18]
wire [1:0] xbar_out_0_d_bits_param = 2'h0; // @[Xbar.scala:216:19]
wire [1:0] xbar__requestBOI_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] xbar__requestBOI_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] xbar__beatsBO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] xbar__beatsBO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] xbar__portsBIO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] xbar__portsBIO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] xbar_portsBIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24]
wire [1:0] xbar_portsDIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24]
wire fixer__a_notFIFO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire fixer__flight_T = 1'h1; // @[FIFOFixer.scala:80:65]
wire fixer__anonOut_a_valid_T = 1'h1; // @[FIFOFixer.scala:95:50]
wire fixer__anonOut_a_valid_T_1 = 1'h1; // @[FIFOFixer.scala:95:47]
wire fixer__anonIn_a_ready_T = 1'h1; // @[FIFOFixer.scala:96:50]
wire fixer__anonIn_a_ready_T_1 = 1'h1; // @[FIFOFixer.scala:96:47]
wire xbar__requestAIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire xbar_requestAIO_0_0 = 1'h1; // @[Xbar.scala:307:107]
wire xbar__requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire xbar_requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107]
wire xbar__requestBOI_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire xbar__requestBOI_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire xbar__requestBOI_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire xbar__requestBOI_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire xbar_requestBOI_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire xbar__requestDOI_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire xbar__requestDOI_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire xbar__requestDOI_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire xbar__requestDOI_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire xbar_requestDOI_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire xbar_beatsBO_opdata = 1'h1; // @[Edges.scala:97:28]
wire xbar__portsAOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire xbar__portsBIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire xbar__portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire xbar__portsDIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire xbar__portsEOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire [63:0] xbar__addressC_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] xbar__addressC_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] xbar__requestBOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74]
wire [63:0] xbar__requestBOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61]
wire [63:0] xbar__beatsBO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74]
wire [63:0] xbar__beatsBO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61]
wire [63:0] xbar__beatsCI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] xbar__beatsCI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] xbar__portsBIO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74]
wire [63:0] xbar__portsBIO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61]
wire [63:0] xbar_portsBIO_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24]
wire [63:0] xbar__portsCOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] xbar__portsCOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] xbar_portsCOI_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24]
wire [31:0] xbar__addressC_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] xbar__addressC_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] xbar__requestCIO_T = 32'h0; // @[Parameters.scala:137:31]
wire [31:0] xbar__requestBOI_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] xbar__requestBOI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] xbar__beatsBO_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] xbar__beatsBO_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] xbar__beatsCI_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] xbar__beatsCI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] xbar__portsBIO_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] xbar__portsBIO_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] xbar_portsBIO_filtered_0_bits_address = 32'h0; // @[Xbar.scala:352:24]
wire [31:0] xbar__portsCOI_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] xbar__portsCOI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] xbar_portsCOI_filtered_0_bits_address = 32'h0; // @[Xbar.scala:352:24]
wire [4:0] xbar__addressC_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] xbar__addressC_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] xbar__requestBOI_WIRE_bits_source = 5'h0; // @[Bundles.scala:264:74]
wire [4:0] xbar__requestBOI_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:264:61]
wire [4:0] xbar__requestBOI_uncommonBits_T = 5'h0; // @[Parameters.scala:52:29]
wire [4:0] xbar_requestBOI_uncommonBits = 5'h0; // @[Parameters.scala:52:56]
wire [4:0] xbar__beatsBO_WIRE_bits_source = 5'h0; // @[Bundles.scala:264:74]
wire [4:0] xbar__beatsBO_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:264:61]
wire [4:0] xbar__beatsCI_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] xbar__beatsCI_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] xbar__portsBIO_WIRE_bits_source = 5'h0; // @[Bundles.scala:264:74]
wire [4:0] xbar__portsBIO_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:264:61]
wire [4:0] xbar_portsBIO_filtered_0_bits_source = 5'h0; // @[Xbar.scala:352:24]
wire [4:0] xbar__portsCOI_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] xbar__portsCOI_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] xbar_portsCOI_filtered_0_bits_source = 5'h0; // @[Xbar.scala:352:24]
wire [2:0] xbar__addressC_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] xbar__addressC_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] xbar__addressC_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] xbar__addressC_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] xbar__addressC_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] xbar__addressC_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] xbar__requestBOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] xbar__requestBOI_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] xbar__requestBOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] xbar__requestBOI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] xbar__beatsBO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] xbar__beatsBO_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] xbar__beatsBO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] xbar__beatsBO_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] xbar_beatsBO_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] xbar_beatsBO_0 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] xbar__beatsCI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] xbar__beatsCI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] xbar__beatsCI_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] xbar__beatsCI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] xbar__beatsCI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] xbar__beatsCI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] xbar_beatsCI_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] xbar_beatsCI_0 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] xbar__portsBIO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] xbar__portsBIO_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] xbar__portsBIO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] xbar__portsBIO_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] xbar_portsBIO_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] xbar_portsBIO_filtered_0_bits_size = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] xbar__portsCOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] xbar__portsCOI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] xbar__portsCOI_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] xbar__portsCOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] xbar__portsCOI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] xbar__portsCOI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] xbar_portsCOI_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] xbar_portsCOI_filtered_0_bits_param = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] xbar_portsCOI_filtered_0_bits_size = 3'h0; // @[Xbar.scala:352:24]
wire [7:0] xbar__requestBOI_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74]
wire [7:0] xbar__requestBOI_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61]
wire [7:0] xbar__beatsBO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74]
wire [7:0] xbar__beatsBO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61]
wire [7:0] xbar__portsBIO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74]
wire [7:0] xbar__portsBIO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61]
wire [7:0] xbar_portsBIO_filtered_0_bits_mask = 8'h0; // @[Xbar.scala:352:24]
wire [5:0] xbar__beatsBO_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] xbar__beatsCI_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] xbar__beatsBO_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [5:0] xbar__beatsCI_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] xbar__beatsBO_decode_T = 13'h3F; // @[package.scala:243:71]
wire [12:0] xbar__beatsCI_decode_T = 13'h3F; // @[package.scala:243:71]
wire [19:0] fixer__allIDs_FIFOed_T = 20'hFFFFF; // @[FIFOFixer.scala:127:48]
wire [32:0] fixer__a_notFIFO_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] fixer__a_notFIFO_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] xbar__requestAIO_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] xbar__requestAIO_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] xbar__requestCIO_T_1 = 33'h0; // @[Parameters.scala:137:41]
wire [32:0] xbar__requestCIO_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] xbar__requestCIO_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire mbus_clock_groups_auto_in_member_mbus_0_clock = auto_mbus_clock_groups_in_member_mbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire mbus_clock_groups_auto_in_member_mbus_0_reset = auto_mbus_clock_groups_in_member_mbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire bus_xingIn_a_ready; // @[MixedNode.scala:551:17]
wire bus_xingIn_a_valid = auto_bus_xing_in_a_valid_0; // @[ClockDomain.scala:14:9]
wire [2:0] bus_xingIn_a_bits_opcode = auto_bus_xing_in_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [2:0] bus_xingIn_a_bits_param = auto_bus_xing_in_a_bits_param_0; // @[ClockDomain.scala:14:9]
wire [2:0] bus_xingIn_a_bits_size = auto_bus_xing_in_a_bits_size_0; // @[ClockDomain.scala:14:9]
wire [4:0] bus_xingIn_a_bits_source = auto_bus_xing_in_a_bits_source_0; // @[ClockDomain.scala:14:9]
wire [31:0] bus_xingIn_a_bits_address = auto_bus_xing_in_a_bits_address_0; // @[ClockDomain.scala:14:9]
wire [7:0] bus_xingIn_a_bits_mask = auto_bus_xing_in_a_bits_mask_0; // @[ClockDomain.scala:14:9]
wire [63:0] bus_xingIn_a_bits_data = auto_bus_xing_in_a_bits_data_0; // @[ClockDomain.scala:14:9]
wire bus_xingIn_a_bits_corrupt = auto_bus_xing_in_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire bus_xingIn_d_ready = auto_bus_xing_in_d_ready_0; // @[ClockDomain.scala:14:9]
wire bus_xingIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] bus_xingIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] bus_xingIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] bus_xingIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] bus_xingIn_d_bits_source; // @[MixedNode.scala:551:17]
wire bus_xingIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire bus_xingIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] bus_xingIn_d_bits_data; // @[MixedNode.scala:551:17]
wire bus_xingIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire [2:0] auto_buffer_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_buffer_out_a_bits_param_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_buffer_out_a_bits_size_0; // @[ClockDomain.scala:14:9]
wire [4:0] auto_buffer_out_a_bits_source_0; // @[ClockDomain.scala:14:9]
wire [27:0] auto_buffer_out_a_bits_address_0; // @[ClockDomain.scala:14:9]
wire [7:0] auto_buffer_out_a_bits_mask_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_buffer_out_a_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_buffer_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire auto_buffer_out_a_valid_0; // @[ClockDomain.scala:14:9]
wire auto_buffer_out_d_ready_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr_0; // @[ClockDomain.scala:14:9]
wire [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size_0; // @[ClockDomain.scala:14:9]
wire [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data_0; // @[ClockDomain.scala:14:9]
wire [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr_0; // @[ClockDomain.scala:14:9]
wire [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size_0; // @[ClockDomain.scala:14:9]
wire [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready_0; // @[ClockDomain.scala:14:9]
wire auto_fixedClockNode_anon_out_1_clock_0; // @[ClockDomain.scala:14:9]
wire auto_fixedClockNode_anon_out_1_reset_0; // @[ClockDomain.scala:14:9]
wire auto_fixedClockNode_anon_out_0_clock_0; // @[ClockDomain.scala:14:9]
wire auto_fixedClockNode_anon_out_0_reset_0; // @[ClockDomain.scala:14:9]
wire auto_bus_xing_in_a_ready_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_bus_xing_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [1:0] auto_bus_xing_in_d_bits_param_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_bus_xing_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
wire [4:0] auto_bus_xing_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
wire auto_bus_xing_in_d_bits_sink_0; // @[ClockDomain.scala:14:9]
wire auto_bus_xing_in_d_bits_denied_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_bus_xing_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_bus_xing_in_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire auto_bus_xing_in_d_valid_0; // @[ClockDomain.scala:14:9]
wire clockSinkNodeIn_clock; // @[MixedNode.scala:551:17]
wire clockSinkNodeIn_reset; // @[MixedNode.scala:551:17]
wire childClock; // @[LazyModuleImp.scala:155:31]
wire childReset; // @[LazyModuleImp.scala:158:31]
wire mbus_clock_groups_nodeIn_member_mbus_0_clock = mbus_clock_groups_auto_in_member_mbus_0_clock; // @[ClockGroup.scala:53:9]
wire mbus_clock_groups_nodeOut_member_mbus_0_clock; // @[MixedNode.scala:542:17]
wire mbus_clock_groups_nodeIn_member_mbus_0_reset = mbus_clock_groups_auto_in_member_mbus_0_reset; // @[ClockGroup.scala:53:9]
wire mbus_clock_groups_nodeOut_member_mbus_0_reset; // @[MixedNode.scala:542:17]
wire clockGroup_auto_in_member_mbus_0_clock = mbus_clock_groups_auto_out_member_mbus_0_clock; // @[ClockGroup.scala:24:9, :53:9]
wire clockGroup_auto_in_member_mbus_0_reset = mbus_clock_groups_auto_out_member_mbus_0_reset; // @[ClockGroup.scala:24:9, :53:9]
assign mbus_clock_groups_auto_out_member_mbus_0_clock = mbus_clock_groups_nodeOut_member_mbus_0_clock; // @[ClockGroup.scala:53:9]
assign mbus_clock_groups_auto_out_member_mbus_0_reset = mbus_clock_groups_nodeOut_member_mbus_0_reset; // @[ClockGroup.scala:53:9]
assign mbus_clock_groups_nodeOut_member_mbus_0_clock = mbus_clock_groups_nodeIn_member_mbus_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign mbus_clock_groups_nodeOut_member_mbus_0_reset = mbus_clock_groups_nodeIn_member_mbus_0_reset; // @[MixedNode.scala:542:17, :551:17]
wire clockGroup_nodeIn_member_mbus_0_clock = clockGroup_auto_in_member_mbus_0_clock; // @[ClockGroup.scala:24:9]
wire clockGroup_nodeOut_clock; // @[MixedNode.scala:542:17]
wire clockGroup_nodeIn_member_mbus_0_reset = clockGroup_auto_in_member_mbus_0_reset; // @[ClockGroup.scala:24:9]
wire clockGroup_nodeOut_reset; // @[MixedNode.scala:542:17]
wire clockGroup_auto_out_clock; // @[ClockGroup.scala:24:9]
wire clockGroup_auto_out_reset; // @[ClockGroup.scala:24:9]
assign clockGroup_auto_out_clock = clockGroup_nodeOut_clock; // @[ClockGroup.scala:24:9]
assign clockGroup_auto_out_reset = clockGroup_nodeOut_reset; // @[ClockGroup.scala:24:9]
assign clockGroup_nodeOut_clock = clockGroup_nodeIn_member_mbus_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign clockGroup_nodeOut_reset = clockGroup_nodeIn_member_mbus_0_reset; // @[MixedNode.scala:542:17, :551:17]
wire fixer_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire buffer_auto_out_a_ready = fixer_auto_anon_in_a_ready; // @[FIFOFixer.scala:50:9]
wire buffer_auto_out_a_valid; // @[Buffer.scala:40:9]
wire fixer_anonIn_a_valid = fixer_auto_anon_in_a_valid; // @[FIFOFixer.scala:50:9]
wire [2:0] buffer_auto_out_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] fixer_anonIn_a_bits_opcode = fixer_auto_anon_in_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [2:0] buffer_auto_out_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] fixer_anonIn_a_bits_param = fixer_auto_anon_in_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] buffer_auto_out_a_bits_size; // @[Buffer.scala:40:9]
wire [2:0] fixer_anonIn_a_bits_size = fixer_auto_anon_in_a_bits_size; // @[FIFOFixer.scala:50:9]
wire [4:0] buffer_auto_out_a_bits_source; // @[Buffer.scala:40:9]
wire [4:0] fixer_anonIn_a_bits_source = fixer_auto_anon_in_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [31:0] buffer_auto_out_a_bits_address; // @[Buffer.scala:40:9]
wire [31:0] fixer_anonIn_a_bits_address = fixer_auto_anon_in_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [7:0] buffer_auto_out_a_bits_mask; // @[Buffer.scala:40:9]
wire [7:0] fixer_anonIn_a_bits_mask = fixer_auto_anon_in_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [63:0] buffer_auto_out_a_bits_data; // @[Buffer.scala:40:9]
wire [63:0] fixer_anonIn_a_bits_data = fixer_auto_anon_in_a_bits_data; // @[FIFOFixer.scala:50:9]
wire buffer_auto_out_a_bits_corrupt; // @[Buffer.scala:40:9]
wire fixer_anonIn_a_bits_corrupt = fixer_auto_anon_in_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire buffer_auto_out_d_ready; // @[Buffer.scala:40:9]
wire fixer_anonIn_d_ready = fixer_auto_anon_in_d_ready; // @[FIFOFixer.scala:50:9]
wire fixer_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] fixer_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire buffer_auto_out_d_valid = fixer_auto_anon_in_d_valid; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] buffer_auto_out_d_bits_opcode = fixer_auto_anon_in_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] buffer_auto_out_d_bits_param = fixer_auto_anon_in_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] buffer_auto_out_d_bits_size = fixer_auto_anon_in_d_bits_size; // @[FIFOFixer.scala:50:9]
wire fixer_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire [4:0] buffer_auto_out_d_bits_source = fixer_auto_anon_in_d_bits_source; // @[FIFOFixer.scala:50:9]
wire fixer_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire buffer_auto_out_d_bits_sink = fixer_auto_anon_in_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire buffer_auto_out_d_bits_denied = fixer_auto_anon_in_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire fixer_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire [63:0] buffer_auto_out_d_bits_data = fixer_auto_anon_in_d_bits_data; // @[FIFOFixer.scala:50:9]
wire buffer_auto_out_d_bits_corrupt = fixer_auto_anon_in_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire fixer_anonOut_a_ready = fixer_auto_anon_out_a_ready; // @[FIFOFixer.scala:50:9]
wire fixer_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] fixer_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] fixer_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] fixer_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] fixer_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] fixer_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] fixer_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] fixer_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire fixer_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire fixer_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire fixer_anonOut_d_valid = fixer_auto_anon_out_d_valid; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_anonOut_d_bits_opcode = fixer_auto_anon_out_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_anonOut_d_bits_param = fixer_auto_anon_out_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_anonOut_d_bits_size = fixer_auto_anon_out_d_bits_size; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_anonOut_d_bits_source = fixer_auto_anon_out_d_bits_source; // @[FIFOFixer.scala:50:9]
wire fixer_anonOut_d_bits_sink = fixer_auto_anon_out_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire fixer_anonOut_d_bits_denied = fixer_auto_anon_out_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_anonOut_d_bits_data = fixer_auto_anon_out_d_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_anonOut_d_bits_corrupt = fixer_auto_anon_out_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_a_bits_size; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_auto_anon_out_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [31:0] fixer_auto_anon_out_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [7:0] fixer_auto_anon_out_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_out_a_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_a_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_d_ready; // @[FIFOFixer.scala:50:9]
wire fixer__anonOut_a_valid_T_2; // @[FIFOFixer.scala:95:33]
wire fixer__anonIn_a_ready_T_2 = fixer_anonOut_a_ready; // @[FIFOFixer.scala:96:33]
assign fixer_auto_anon_out_a_valid = fixer_anonOut_a_valid; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_a_bits_opcode = fixer_anonOut_a_bits_opcode; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_a_bits_param = fixer_anonOut_a_bits_param; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_a_bits_size = fixer_anonOut_a_bits_size; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_a_bits_source = fixer_anonOut_a_bits_source; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_a_bits_address = fixer_anonOut_a_bits_address; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_a_bits_mask = fixer_anonOut_a_bits_mask; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_a_bits_data = fixer_anonOut_a_bits_data; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_a_bits_corrupt = fixer_anonOut_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_d_ready = fixer_anonOut_d_ready; // @[FIFOFixer.scala:50:9]
assign fixer_anonIn_d_valid = fixer_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_opcode = fixer_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_param = fixer_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_size = fixer_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_source = fixer_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_sink = fixer_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_denied = fixer_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_data = fixer_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_corrupt = fixer_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign fixer_auto_anon_in_a_ready = fixer_anonIn_a_ready; // @[FIFOFixer.scala:50:9]
assign fixer__anonOut_a_valid_T_2 = fixer_anonIn_a_valid; // @[FIFOFixer.scala:95:33]
assign fixer_anonOut_a_bits_opcode = fixer_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_param = fixer_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_size = fixer_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_source = fixer_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_address = fixer_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
wire [31:0] fixer__a_notFIFO_T = fixer_anonIn_a_bits_address; // @[Parameters.scala:137:31]
wire [31:0] fixer__a_id_T = fixer_anonIn_a_bits_address; // @[Parameters.scala:137:31]
assign fixer_anonOut_a_bits_mask = fixer_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_data = fixer_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_corrupt = fixer_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_d_ready = fixer_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign fixer_auto_anon_in_d_valid = fixer_anonIn_d_valid; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_d_bits_opcode = fixer_anonIn_d_bits_opcode; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_d_bits_param = fixer_anonIn_d_bits_param; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_d_bits_size = fixer_anonIn_d_bits_size; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_d_bits_source = fixer_anonIn_d_bits_source; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_d_bits_sink = fixer_anonIn_d_bits_sink; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_d_bits_denied = fixer_anonIn_d_bits_denied; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_d_bits_data = fixer_anonIn_d_bits_data; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_d_bits_corrupt = fixer_anonIn_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [32:0] fixer__a_notFIFO_T_1 = {1'h0, fixer__a_notFIFO_T}; // @[Parameters.scala:137:{31,41}]
wire [32:0] fixer__a_id_T_1 = {1'h0, fixer__a_id_T}; // @[Parameters.scala:137:{31,41}]
wire [32:0] fixer__a_id_T_2 = fixer__a_id_T_1 & 33'h80000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] fixer__a_id_T_3 = fixer__a_id_T_2; // @[Parameters.scala:137:46]
wire fixer__a_id_T_4 = fixer__a_id_T_3 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire [31:0] fixer__a_id_T_5 = fixer_anonIn_a_bits_address ^ 32'h80000000; // @[Parameters.scala:137:31]
wire [32:0] fixer__a_id_T_6 = {1'h0, fixer__a_id_T_5}; // @[Parameters.scala:137:{31,41}]
wire [32:0] fixer__a_id_T_7 = fixer__a_id_T_6 & 33'h80000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] fixer__a_id_T_8 = fixer__a_id_T_7; // @[Parameters.scala:137:46]
wire fixer__a_id_T_9 = fixer__a_id_T_8 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire fixer__a_id_T_11 = fixer__a_id_T_9; // @[Mux.scala:30:73]
wire [1:0] fixer__a_id_T_10 = {fixer__a_id_T_4, 1'h0}; // @[Mux.scala:30:73]
wire [1:0] fixer__a_id_T_12 = {fixer__a_id_T_10[1], fixer__a_id_T_10[0] | fixer__a_id_T_11}; // @[Mux.scala:30:73]
wire [1:0] fixer_a_id = fixer__a_id_T_12; // @[Mux.scala:30:73]
wire fixer_a_noDomain = fixer_a_id == 2'h0; // @[Mux.scala:30:73]
wire fixer__a_first_T = fixer_anonIn_a_ready & fixer_anonIn_a_valid; // @[Decoupled.scala:51:35]
wire [12:0] fixer__a_first_beats1_decode_T = 13'h3F << fixer_anonIn_a_bits_size; // @[package.scala:243:71]
wire [5:0] fixer__a_first_beats1_decode_T_1 = fixer__a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] fixer__a_first_beats1_decode_T_2 = ~fixer__a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] fixer_a_first_beats1_decode = fixer__a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire fixer__a_first_beats1_opdata_T = fixer_anonIn_a_bits_opcode[2]; // @[Edges.scala:92:37]
wire fixer_a_first_beats1_opdata = ~fixer__a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] fixer_a_first_beats1 = fixer_a_first_beats1_opdata ? fixer_a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] fixer_a_first_counter; // @[Edges.scala:229:27]
wire [3:0] fixer__a_first_counter1_T = {1'h0, fixer_a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] fixer_a_first_counter1 = fixer__a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire fixer_a_first = fixer_a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire fixer__a_first_last_T = fixer_a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire fixer__a_first_last_T_1 = fixer_a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire fixer_a_first_last = fixer__a_first_last_T | fixer__a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire fixer_a_first_done = fixer_a_first_last & fixer__a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] fixer__a_first_count_T = ~fixer_a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] fixer_a_first_count = fixer_a_first_beats1 & fixer__a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] fixer__a_first_counter_T = fixer_a_first ? fixer_a_first_beats1 : fixer_a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire fixer__d_first_T = fixer_anonOut_d_ready & fixer_anonOut_d_valid; // @[Decoupled.scala:51:35]
wire [12:0] fixer__d_first_beats1_decode_T = 13'h3F << fixer_anonOut_d_bits_size; // @[package.scala:243:71]
wire [5:0] fixer__d_first_beats1_decode_T_1 = fixer__d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] fixer__d_first_beats1_decode_T_2 = ~fixer__d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] fixer_d_first_beats1_decode = fixer__d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire fixer_d_first_beats1_opdata = fixer_anonOut_d_bits_opcode[0]; // @[Edges.scala:106:36]
wire [2:0] fixer_d_first_beats1 = fixer_d_first_beats1_opdata ? fixer_d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] fixer_d_first_counter; // @[Edges.scala:229:27]
wire [3:0] fixer__d_first_counter1_T = {1'h0, fixer_d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] fixer_d_first_counter1 = fixer__d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire fixer_d_first_first = fixer_d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire fixer__d_first_last_T = fixer_d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire fixer__d_first_last_T_1 = fixer_d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire fixer_d_first_last = fixer__d_first_last_T | fixer__d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire fixer_d_first_done = fixer_d_first_last & fixer__d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] fixer__d_first_count_T = ~fixer_d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] fixer_d_first_count = fixer_d_first_beats1 & fixer__d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] fixer__d_first_counter_T = fixer_d_first_first ? fixer_d_first_beats1 : fixer_d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire fixer__d_first_T_1 = fixer_anonOut_d_bits_opcode != 3'h6; // @[FIFOFixer.scala:75:63]
wire fixer_d_first = fixer_d_first_first & fixer__d_first_T_1; // @[FIFOFixer.scala:75:{42,63}]
reg fixer_flight_0; // @[FIFOFixer.scala:79:27]
reg fixer_flight_1; // @[FIFOFixer.scala:79:27]
reg fixer_flight_2; // @[FIFOFixer.scala:79:27]
reg fixer_flight_3; // @[FIFOFixer.scala:79:27]
reg fixer_flight_4; // @[FIFOFixer.scala:79:27]
reg fixer_flight_5; // @[FIFOFixer.scala:79:27]
reg fixer_flight_6; // @[FIFOFixer.scala:79:27]
reg fixer_flight_7; // @[FIFOFixer.scala:79:27]
reg fixer_flight_8; // @[FIFOFixer.scala:79:27]
reg fixer_flight_9; // @[FIFOFixer.scala:79:27]
reg fixer_flight_10; // @[FIFOFixer.scala:79:27]
reg fixer_flight_11; // @[FIFOFixer.scala:79:27]
reg fixer_flight_12; // @[FIFOFixer.scala:79:27]
reg fixer_flight_13; // @[FIFOFixer.scala:79:27]
reg fixer_flight_14; // @[FIFOFixer.scala:79:27]
reg fixer_flight_15; // @[FIFOFixer.scala:79:27]
reg fixer_flight_16; // @[FIFOFixer.scala:79:27]
reg fixer_flight_17; // @[FIFOFixer.scala:79:27]
reg fixer_flight_18; // @[FIFOFixer.scala:79:27]
reg fixer_flight_19; // @[FIFOFixer.scala:79:27]
wire fixer__T_2 = fixer_anonIn_d_ready & fixer_anonIn_d_valid; // @[Decoupled.scala:51:35]
assign fixer_anonOut_a_valid = fixer__anonOut_a_valid_T_2; // @[FIFOFixer.scala:95:33]
assign fixer_anonIn_a_ready = fixer__anonIn_a_ready_T_2; // @[FIFOFixer.scala:96:33]
reg [19:0] fixer_SourceIdFIFOed; // @[FIFOFixer.scala:115:35]
wire [19:0] fixer_SourceIdSet; // @[FIFOFixer.scala:116:36]
wire [19:0] fixer_SourceIdClear; // @[FIFOFixer.scala:117:38]
wire [31:0] fixer__SourceIdSet_T = 32'h1 << fixer_anonIn_a_bits_source; // @[OneHot.scala:58:35]
assign fixer_SourceIdSet = fixer_a_first & fixer__a_first_T ? fixer__SourceIdSet_T[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire [31:0] fixer__SourceIdClear_T = 32'h1 << fixer_anonIn_d_bits_source; // @[OneHot.scala:58:35]
assign fixer_SourceIdClear = fixer_d_first & fixer__T_2 ? fixer__SourceIdClear_T[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire [19:0] fixer__SourceIdFIFOed_T = fixer_SourceIdFIFOed | fixer_SourceIdSet; // @[FIFOFixer.scala:115:35, :116:36, :126:40]
wire fixer_allIDs_FIFOed = &fixer_SourceIdFIFOed; // @[FIFOFixer.scala:115:35, :127:41]
wire buffer_nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire bus_xingOut_a_ready = buffer_auto_in_a_ready; // @[Buffer.scala:40:9]
wire bus_xingOut_a_valid; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_valid = buffer_auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] bus_xingOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] buffer_nodeIn_a_bits_opcode = buffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] bus_xingOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] buffer_nodeIn_a_bits_param = buffer_auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] bus_xingOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] buffer_nodeIn_a_bits_size = buffer_auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [4:0] bus_xingOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [4:0] buffer_nodeIn_a_bits_source = buffer_auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] bus_xingOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [31:0] buffer_nodeIn_a_bits_address = buffer_auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] bus_xingOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [7:0] buffer_nodeIn_a_bits_mask = buffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] bus_xingOut_a_bits_data; // @[MixedNode.scala:542:17]
wire [63:0] buffer_nodeIn_a_bits_data = buffer_auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire bus_xingOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_bits_corrupt = buffer_auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire bus_xingOut_d_ready; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_d_ready = buffer_auto_in_d_ready; // @[Buffer.scala:40:9]
wire buffer_nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] buffer_nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire bus_xingOut_d_valid = buffer_auto_in_d_valid; // @[Buffer.scala:40:9]
wire [1:0] buffer_nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] bus_xingOut_d_bits_opcode = buffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] buffer_nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] bus_xingOut_d_bits_param = buffer_auto_in_d_bits_param; // @[Buffer.scala:40:9]
wire [4:0] buffer_nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] bus_xingOut_d_bits_size = buffer_auto_in_d_bits_size; // @[Buffer.scala:40:9]
wire buffer_nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire [4:0] bus_xingOut_d_bits_source = buffer_auto_in_d_bits_source; // @[Buffer.scala:40:9]
wire buffer_nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire bus_xingOut_d_bits_sink = buffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9]
wire [63:0] buffer_nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire bus_xingOut_d_bits_denied = buffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9]
wire buffer_nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire [63:0] bus_xingOut_d_bits_data = buffer_auto_in_d_bits_data; // @[Buffer.scala:40:9]
wire bus_xingOut_d_bits_corrupt = buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_ready = buffer_auto_out_a_ready; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_valid; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_a_valid = buffer_auto_out_a_valid; // @[FIFOFixer.scala:50:9]
wire [2:0] buffer_nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_a_bits_opcode = buffer_auto_out_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [2:0] buffer_nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_a_bits_param = buffer_auto_out_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] buffer_nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_a_bits_size = buffer_auto_out_a_bits_size; // @[FIFOFixer.scala:50:9]
wire [4:0] buffer_nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_a_bits_source = buffer_auto_out_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [31:0] buffer_nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_a_bits_address = buffer_auto_out_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [7:0] buffer_nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_a_bits_mask = buffer_auto_out_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [63:0] buffer_nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_a_bits_data = buffer_auto_out_a_bits_data; // @[FIFOFixer.scala:50:9]
wire buffer_nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_a_bits_corrupt = buffer_auto_out_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire buffer_nodeOut_d_ready; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_d_ready = buffer_auto_out_d_ready; // @[FIFOFixer.scala:50:9]
wire buffer_nodeOut_d_valid = buffer_auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] buffer_nodeOut_d_bits_opcode = buffer_auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] buffer_nodeOut_d_bits_param = buffer_auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [2:0] buffer_nodeOut_d_bits_size = buffer_auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [4:0] buffer_nodeOut_d_bits_source = buffer_auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire buffer_nodeOut_d_bits_sink = buffer_auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire buffer_nodeOut_d_bits_denied = buffer_auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] buffer_nodeOut_d_bits_data = buffer_auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire buffer_nodeOut_d_bits_corrupt = buffer_auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
assign buffer_nodeIn_a_ready = buffer_nodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign buffer_auto_out_a_valid = buffer_nodeOut_a_valid; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_opcode = buffer_nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_param = buffer_nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_size = buffer_nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_source = buffer_nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_address = buffer_nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_mask = buffer_nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_data = buffer_nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_corrupt = buffer_nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign buffer_auto_out_d_ready = buffer_nodeOut_d_ready; // @[Buffer.scala:40:9]
assign buffer_nodeIn_d_valid = buffer_nodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_opcode = buffer_nodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_param = buffer_nodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_size = buffer_nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_source = buffer_nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_sink = buffer_nodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_denied = buffer_nodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_data = buffer_nodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_corrupt = buffer_nodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign buffer_auto_in_a_ready = buffer_nodeIn_a_ready; // @[Buffer.scala:40:9]
assign buffer_nodeOut_a_valid = buffer_nodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_opcode = buffer_nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_param = buffer_nodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_size = buffer_nodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_source = buffer_nodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_address = buffer_nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_mask = buffer_nodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_data = buffer_nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_corrupt = buffer_nodeIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_d_ready = buffer_nodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign buffer_auto_in_d_valid = buffer_nodeIn_d_valid; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_opcode = buffer_nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_param = buffer_nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_size = buffer_nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_source = buffer_nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_sink = buffer_nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_denied = buffer_nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_data = buffer_nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_corrupt = buffer_nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
wire xbar_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire xbar_anonIn_a_valid = xbar_auto_anon_in_a_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_anonIn_a_bits_opcode = xbar_auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] xbar_anonIn_a_bits_param = xbar_auto_anon_in_a_bits_param; // @[Xbar.scala:74:9]
wire [2:0] xbar_anonIn_a_bits_size = xbar_auto_anon_in_a_bits_size; // @[Xbar.scala:74:9]
wire [4:0] xbar_anonIn_a_bits_source = xbar_auto_anon_in_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] xbar_anonIn_a_bits_address = xbar_auto_anon_in_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] xbar_anonIn_a_bits_mask = xbar_auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] xbar_anonIn_a_bits_data = xbar_auto_anon_in_a_bits_data; // @[Xbar.scala:74:9]
wire xbar_anonIn_a_bits_corrupt = xbar_auto_anon_in_a_bits_corrupt; // @[Xbar.scala:74:9]
wire xbar_anonIn_d_ready = xbar_auto_anon_in_d_ready; // @[Xbar.scala:74:9]
wire xbar_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] xbar_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] xbar_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] xbar_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire xbar_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] xbar_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire xbar_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire xbar_anonOut_a_ready = xbar_auto_anon_out_a_ready; // @[Xbar.scala:74:9]
wire xbar_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] xbar_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] xbar_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] xbar_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] xbar_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] xbar_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] xbar_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] xbar_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire xbar_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire xbar_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire xbar_anonOut_d_valid = xbar_auto_anon_out_d_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_anonOut_d_bits_opcode = xbar_auto_anon_out_d_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] xbar_anonOut_d_bits_size = xbar_auto_anon_out_d_bits_size; // @[Xbar.scala:74:9]
wire [4:0] xbar_anonOut_d_bits_source = xbar_auto_anon_out_d_bits_source; // @[Xbar.scala:74:9]
wire xbar_anonOut_d_bits_denied = xbar_auto_anon_out_d_bits_denied; // @[Xbar.scala:74:9]
wire [63:0] xbar_anonOut_d_bits_data = xbar_auto_anon_out_d_bits_data; // @[Xbar.scala:74:9]
wire xbar_anonOut_d_bits_corrupt = xbar_auto_anon_out_d_bits_corrupt; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_a_ready; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_d_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_in_d_bits_size; // @[Xbar.scala:74:9]
wire [4:0] xbar_auto_anon_in_d_bits_source; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_d_bits_denied; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_in_d_bits_data; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_d_bits_corrupt; // @[Xbar.scala:74:9]
wire xbar_auto_anon_in_d_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_a_bits_param; // @[Xbar.scala:74:9]
wire [2:0] xbar_auto_anon_out_a_bits_size; // @[Xbar.scala:74:9]
wire [4:0] xbar_auto_anon_out_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] xbar_auto_anon_out_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] xbar_auto_anon_out_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] xbar_auto_anon_out_a_bits_data; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_a_bits_corrupt; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_a_valid; // @[Xbar.scala:74:9]
wire xbar_auto_anon_out_d_ready; // @[Xbar.scala:74:9]
wire xbar_out_0_a_ready = xbar_anonOut_a_ready; // @[Xbar.scala:216:19]
wire xbar_out_0_a_valid; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_valid = xbar_anonOut_a_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_opcode = xbar_anonOut_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] xbar_out_0_a_bits_param; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_param = xbar_anonOut_a_bits_param; // @[Xbar.scala:74:9]
wire [2:0] xbar_out_0_a_bits_size; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_size = xbar_anonOut_a_bits_size; // @[Xbar.scala:74:9]
wire [4:0] xbar_out_0_a_bits_source; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_source = xbar_anonOut_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] xbar_out_0_a_bits_address; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_address = xbar_anonOut_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] xbar_out_0_a_bits_mask; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_mask = xbar_anonOut_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] xbar_out_0_a_bits_data; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_data = xbar_anonOut_a_bits_data; // @[Xbar.scala:74:9]
wire xbar_out_0_a_bits_corrupt; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_a_bits_corrupt = xbar_anonOut_a_bits_corrupt; // @[Xbar.scala:74:9]
wire xbar_out_0_d_ready; // @[Xbar.scala:216:19]
assign xbar_auto_anon_out_d_ready = xbar_anonOut_d_ready; // @[Xbar.scala:74:9]
wire xbar_out_0_d_valid = xbar_anonOut_d_valid; // @[Xbar.scala:216:19]
wire [2:0] xbar_out_0_d_bits_opcode = xbar_anonOut_d_bits_opcode; // @[Xbar.scala:216:19]
wire [2:0] xbar_out_0_d_bits_size = xbar_anonOut_d_bits_size; // @[Xbar.scala:216:19]
wire [4:0] xbar_out_0_d_bits_source = xbar_anonOut_d_bits_source; // @[Xbar.scala:216:19]
wire xbar_out_0_d_bits_denied = xbar_anonOut_d_bits_denied; // @[Xbar.scala:216:19]
wire [63:0] xbar_out_0_d_bits_data = xbar_anonOut_d_bits_data; // @[Xbar.scala:216:19]
wire xbar_out_0_d_bits_corrupt = xbar_anonOut_d_bits_corrupt; // @[Xbar.scala:216:19]
wire xbar_in_0_a_ready; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_a_ready = xbar_anonIn_a_ready; // @[Xbar.scala:74:9]
wire xbar_in_0_a_valid = xbar_anonIn_a_valid; // @[Xbar.scala:159:18]
wire [2:0] xbar_in_0_a_bits_opcode = xbar_anonIn_a_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] xbar_in_0_a_bits_param = xbar_anonIn_a_bits_param; // @[Xbar.scala:159:18]
wire [2:0] xbar_in_0_a_bits_size = xbar_anonIn_a_bits_size; // @[Xbar.scala:159:18]
wire [4:0] xbar__in_0_a_bits_source_T = xbar_anonIn_a_bits_source; // @[Xbar.scala:166:55]
wire [31:0] xbar_in_0_a_bits_address = xbar_anonIn_a_bits_address; // @[Xbar.scala:159:18]
wire [7:0] xbar_in_0_a_bits_mask = xbar_anonIn_a_bits_mask; // @[Xbar.scala:159:18]
wire [63:0] xbar_in_0_a_bits_data = xbar_anonIn_a_bits_data; // @[Xbar.scala:159:18]
wire xbar_in_0_a_bits_corrupt = xbar_anonIn_a_bits_corrupt; // @[Xbar.scala:159:18]
wire xbar_in_0_d_ready = xbar_anonIn_d_ready; // @[Xbar.scala:159:18]
wire xbar_in_0_d_valid; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_valid = xbar_anonIn_d_valid; // @[Xbar.scala:74:9]
wire [2:0] xbar_in_0_d_bits_opcode; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_opcode = xbar_anonIn_d_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] xbar_in_0_d_bits_size; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_size = xbar_anonIn_d_bits_size; // @[Xbar.scala:74:9]
wire [4:0] xbar__anonIn_d_bits_source_T; // @[Xbar.scala:156:69]
assign xbar_auto_anon_in_d_bits_source = xbar_anonIn_d_bits_source; // @[Xbar.scala:74:9]
wire xbar_in_0_d_bits_denied; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_denied = xbar_anonIn_d_bits_denied; // @[Xbar.scala:74:9]
wire [63:0] xbar_in_0_d_bits_data; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_data = xbar_anonIn_d_bits_data; // @[Xbar.scala:74:9]
wire xbar_in_0_d_bits_corrupt; // @[Xbar.scala:159:18]
assign xbar_auto_anon_in_d_bits_corrupt = xbar_anonIn_d_bits_corrupt; // @[Xbar.scala:74:9]
wire xbar_portsAOI_filtered_0_ready; // @[Xbar.scala:352:24]
assign xbar_anonIn_a_ready = xbar_in_0_a_ready; // @[Xbar.scala:159:18]
wire xbar__portsAOI_filtered_0_valid_T_1 = xbar_in_0_a_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] xbar_portsAOI_filtered_0_bits_opcode = xbar_in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [2:0] xbar_portsAOI_filtered_0_bits_param = xbar_in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24]
wire [2:0] xbar_portsAOI_filtered_0_bits_size = xbar_in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24]
wire [4:0] xbar_portsAOI_filtered_0_bits_source = xbar_in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24]
wire [31:0] xbar__requestAIO_T = xbar_in_0_a_bits_address; // @[Xbar.scala:159:18]
wire [31:0] xbar_portsAOI_filtered_0_bits_address = xbar_in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [7:0] xbar_portsAOI_filtered_0_bits_mask = xbar_in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24]
wire [63:0] xbar_portsAOI_filtered_0_bits_data = xbar_in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24]
wire xbar_portsAOI_filtered_0_bits_corrupt = xbar_in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
wire xbar_portsDIO_filtered_0_ready = xbar_in_0_d_ready; // @[Xbar.scala:159:18, :352:24]
wire xbar_portsDIO_filtered_0_valid; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_valid = xbar_in_0_d_valid; // @[Xbar.scala:159:18]
wire [2:0] xbar_portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_opcode = xbar_in_0_d_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] xbar_portsDIO_filtered_0_bits_size; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_size = xbar_in_0_d_bits_size; // @[Xbar.scala:159:18]
wire [4:0] xbar_portsDIO_filtered_0_bits_source; // @[Xbar.scala:352:24]
assign xbar__anonIn_d_bits_source_T = xbar_in_0_d_bits_source; // @[Xbar.scala:156:69, :159:18]
wire xbar_portsDIO_filtered_0_bits_denied; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_denied = xbar_in_0_d_bits_denied; // @[Xbar.scala:159:18]
wire [63:0] xbar_portsDIO_filtered_0_bits_data; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_data = xbar_in_0_d_bits_data; // @[Xbar.scala:159:18]
wire xbar_portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:352:24]
assign xbar_anonIn_d_bits_corrupt = xbar_in_0_d_bits_corrupt; // @[Xbar.scala:159:18]
assign xbar_in_0_a_bits_source = xbar__in_0_a_bits_source_T; // @[Xbar.scala:159:18, :166:55]
assign xbar_anonIn_d_bits_source = xbar__anonIn_d_bits_source_T; // @[Xbar.scala:156:69]
assign xbar_portsAOI_filtered_0_ready = xbar_out_0_a_ready; // @[Xbar.scala:216:19, :352:24]
wire xbar_portsAOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign xbar_anonOut_a_valid = xbar_out_0_a_valid; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_opcode = xbar_out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_param = xbar_out_0_a_bits_param; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_size = xbar_out_0_a_bits_size; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_source = xbar_out_0_a_bits_source; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_address = xbar_out_0_a_bits_address; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_mask = xbar_out_0_a_bits_mask; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_data = xbar_out_0_a_bits_data; // @[Xbar.scala:216:19]
assign xbar_anonOut_a_bits_corrupt = xbar_out_0_a_bits_corrupt; // @[Xbar.scala:216:19]
assign xbar_anonOut_d_ready = xbar_out_0_d_ready; // @[Xbar.scala:216:19]
wire xbar__portsDIO_filtered_0_valid_T_1 = xbar_out_0_d_valid; // @[Xbar.scala:216:19, :355:40]
assign xbar_portsDIO_filtered_0_bits_opcode = xbar_out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsDIO_filtered_0_bits_size = xbar_out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24]
wire [4:0] xbar__requestDOI_uncommonBits_T = xbar_out_0_d_bits_source; // @[Xbar.scala:216:19]
assign xbar_portsDIO_filtered_0_bits_source = xbar_out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsDIO_filtered_0_bits_denied = xbar_out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsDIO_filtered_0_bits_data = xbar_out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsDIO_filtered_0_bits_corrupt = xbar_out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
wire [32:0] xbar__requestAIO_T_1 = {1'h0, xbar__requestAIO_T}; // @[Parameters.scala:137:{31,41}]
wire [4:0] xbar_requestDOI_uncommonBits = xbar__requestDOI_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [12:0] xbar__beatsAI_decode_T = 13'h3F << xbar_in_0_a_bits_size; // @[package.scala:243:71]
wire [5:0] xbar__beatsAI_decode_T_1 = xbar__beatsAI_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] xbar__beatsAI_decode_T_2 = ~xbar__beatsAI_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] xbar_beatsAI_decode = xbar__beatsAI_decode_T_2[5:3]; // @[package.scala:243:46]
wire xbar__beatsAI_opdata_T = xbar_in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18]
wire xbar_beatsAI_opdata = ~xbar__beatsAI_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] xbar_beatsAI_0 = xbar_beatsAI_opdata ? xbar_beatsAI_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
wire [12:0] xbar__beatsDO_decode_T = 13'h3F << xbar_out_0_d_bits_size; // @[package.scala:243:71]
wire [5:0] xbar__beatsDO_decode_T_1 = xbar__beatsDO_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] xbar__beatsDO_decode_T_2 = ~xbar__beatsDO_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] xbar_beatsDO_decode = xbar__beatsDO_decode_T_2[5:3]; // @[package.scala:243:46]
wire xbar_beatsDO_opdata = xbar_out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19]
wire [2:0] xbar_beatsDO_0 = xbar_beatsDO_opdata ? xbar_beatsDO_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
assign xbar_in_0_a_ready = xbar_portsAOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24]
assign xbar_out_0_a_valid = xbar_portsAOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_opcode = xbar_portsAOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_param = xbar_portsAOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_size = xbar_portsAOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_source = xbar_portsAOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_address = xbar_portsAOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_mask = xbar_portsAOI_filtered_0_bits_mask; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_data = xbar_portsAOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24]
assign xbar_out_0_a_bits_corrupt = xbar_portsAOI_filtered_0_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
assign xbar_portsAOI_filtered_0_valid = xbar__portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign xbar_out_0_d_ready = xbar_portsDIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24]
assign xbar_in_0_d_valid = xbar_portsDIO_filtered_0_valid; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_opcode = xbar_portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_size = xbar_portsDIO_filtered_0_bits_size; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_source = xbar_portsDIO_filtered_0_bits_source; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_denied = xbar_portsDIO_filtered_0_bits_denied; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_data = xbar_portsDIO_filtered_0_bits_data; // @[Xbar.scala:159:18, :352:24]
assign xbar_in_0_d_bits_corrupt = xbar_portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
assign xbar_portsDIO_filtered_0_valid = xbar__portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
wire coupler_to_mbusscratchpad00_tlIn_a_ready; // @[MixedNode.scala:551:17]
wire coupler_to_mbusscratchpad00_tlIn_a_valid = coupler_to_mbusscratchpad00_auto_tl_in_a_valid; // @[MixedNode.scala:551:17]
wire [2:0] coupler_to_mbusscratchpad00_tlIn_a_bits_opcode = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] coupler_to_mbusscratchpad00_tlIn_a_bits_param = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] coupler_to_mbusscratchpad00_tlIn_a_bits_size = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] coupler_to_mbusscratchpad00_tlIn_a_bits_source = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_source; // @[MixedNode.scala:551:17]
wire [27:0] coupler_to_mbusscratchpad00_tlIn_a_bits_address = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_address; // @[MixedNode.scala:551:17]
wire [7:0] coupler_to_mbusscratchpad00_tlIn_a_bits_mask = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_mask; // @[MixedNode.scala:551:17]
wire [63:0] coupler_to_mbusscratchpad00_tlIn_a_bits_data = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_data; // @[MixedNode.scala:551:17]
wire coupler_to_mbusscratchpad00_tlIn_a_bits_corrupt = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_corrupt; // @[MixedNode.scala:551:17]
wire coupler_to_mbusscratchpad00_tlIn_d_ready = coupler_to_mbusscratchpad00_auto_tl_in_d_ready; // @[MixedNode.scala:551:17]
wire coupler_to_mbusscratchpad00_tlIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] coupler_to_mbusscratchpad00_tlIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] coupler_to_mbusscratchpad00_tlIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] coupler_to_mbusscratchpad00_tlIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] coupler_to_mbusscratchpad00_tlIn_d_bits_source; // @[MixedNode.scala:551:17]
wire coupler_to_mbusscratchpad00_tlIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire coupler_to_mbusscratchpad00_tlIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] coupler_to_mbusscratchpad00_tlIn_d_bits_data; // @[MixedNode.scala:551:17]
wire coupler_to_mbusscratchpad00_tlIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire coupler_to_mbusscratchpad00_tlOut_a_ready = coupler_to_mbusscratchpad00_auto_tl_out_a_ready; // @[MixedNode.scala:542:17]
wire coupler_to_mbusscratchpad00_tlOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_mbusscratchpad00_tlOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_mbusscratchpad00_tlOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_mbusscratchpad00_tlOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] coupler_to_mbusscratchpad00_tlOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [27:0] coupler_to_mbusscratchpad00_tlOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] coupler_to_mbusscratchpad00_tlOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] coupler_to_mbusscratchpad00_tlOut_a_bits_data; // @[MixedNode.scala:542:17]
wire coupler_to_mbusscratchpad00_tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire coupler_to_mbusscratchpad00_tlOut_d_ready; // @[MixedNode.scala:542:17]
wire coupler_to_mbusscratchpad00_tlOut_d_valid = coupler_to_mbusscratchpad00_auto_tl_out_d_valid; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_mbusscratchpad00_tlOut_d_bits_opcode = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_opcode; // @[MixedNode.scala:542:17]
wire [1:0] coupler_to_mbusscratchpad00_tlOut_d_bits_param = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_mbusscratchpad00_tlOut_d_bits_size = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] coupler_to_mbusscratchpad00_tlOut_d_bits_source = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_source; // @[MixedNode.scala:542:17]
wire coupler_to_mbusscratchpad00_tlOut_d_bits_sink = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_sink; // @[MixedNode.scala:542:17]
wire coupler_to_mbusscratchpad00_tlOut_d_bits_denied = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_denied; // @[MixedNode.scala:542:17]
wire [63:0] coupler_to_mbusscratchpad00_tlOut_d_bits_data = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_data; // @[MixedNode.scala:542:17]
wire coupler_to_mbusscratchpad00_tlOut_d_bits_corrupt = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_corrupt; // @[MixedNode.scala:542:17]
wire coupler_to_mbusscratchpad00_auto_tl_in_a_ready; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_mbusscratchpad00_auto_tl_in_d_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [1:0] coupler_to_mbusscratchpad00_auto_tl_in_d_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_mbusscratchpad00_auto_tl_in_d_bits_size; // @[LazyModuleImp.scala:138:7]
wire [4:0] coupler_to_mbusscratchpad00_auto_tl_in_d_bits_source; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_in_d_bits_sink; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_in_d_bits_denied; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_to_mbusscratchpad00_auto_tl_in_d_bits_data; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_in_d_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_in_d_valid; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_size; // @[LazyModuleImp.scala:138:7]
wire [4:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_source; // @[LazyModuleImp.scala:138:7]
wire [27:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_address; // @[LazyModuleImp.scala:138:7]
wire [7:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_mask; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_data; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_out_a_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_out_a_valid; // @[LazyModuleImp.scala:138:7]
wire coupler_to_mbusscratchpad00_auto_tl_out_d_ready; // @[LazyModuleImp.scala:138:7]
assign coupler_to_mbusscratchpad00_tlIn_a_ready = coupler_to_mbusscratchpad00_tlOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_auto_tl_out_a_valid = coupler_to_mbusscratchpad00_tlOut_a_valid; // @[MixedNode.scala:542:17]
assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_opcode = coupler_to_mbusscratchpad00_tlOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_param = coupler_to_mbusscratchpad00_tlOut_a_bits_param; // @[MixedNode.scala:542:17]
assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_size = coupler_to_mbusscratchpad00_tlOut_a_bits_size; // @[MixedNode.scala:542:17]
assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_source = coupler_to_mbusscratchpad00_tlOut_a_bits_source; // @[MixedNode.scala:542:17]
assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_address = coupler_to_mbusscratchpad00_tlOut_a_bits_address; // @[MixedNode.scala:542:17]
assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_mask = coupler_to_mbusscratchpad00_tlOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_data = coupler_to_mbusscratchpad00_tlOut_a_bits_data; // @[MixedNode.scala:542:17]
assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_corrupt = coupler_to_mbusscratchpad00_tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign coupler_to_mbusscratchpad00_auto_tl_out_d_ready = coupler_to_mbusscratchpad00_tlOut_d_ready; // @[MixedNode.scala:542:17]
assign coupler_to_mbusscratchpad00_tlIn_d_valid = coupler_to_mbusscratchpad00_tlOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlIn_d_bits_opcode = coupler_to_mbusscratchpad00_tlOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlIn_d_bits_param = coupler_to_mbusscratchpad00_tlOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlIn_d_bits_size = coupler_to_mbusscratchpad00_tlOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlIn_d_bits_source = coupler_to_mbusscratchpad00_tlOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlIn_d_bits_sink = coupler_to_mbusscratchpad00_tlOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlIn_d_bits_denied = coupler_to_mbusscratchpad00_tlOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlIn_d_bits_data = coupler_to_mbusscratchpad00_tlOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlIn_d_bits_corrupt = coupler_to_mbusscratchpad00_tlOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_auto_tl_in_a_ready = coupler_to_mbusscratchpad00_tlIn_a_ready; // @[MixedNode.scala:551:17]
assign coupler_to_mbusscratchpad00_tlOut_a_valid = coupler_to_mbusscratchpad00_tlIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlOut_a_bits_opcode = coupler_to_mbusscratchpad00_tlIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlOut_a_bits_param = coupler_to_mbusscratchpad00_tlIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlOut_a_bits_size = coupler_to_mbusscratchpad00_tlIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlOut_a_bits_source = coupler_to_mbusscratchpad00_tlIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlOut_a_bits_address = coupler_to_mbusscratchpad00_tlIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlOut_a_bits_mask = coupler_to_mbusscratchpad00_tlIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlOut_a_bits_data = coupler_to_mbusscratchpad00_tlIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlOut_a_bits_corrupt = coupler_to_mbusscratchpad00_tlIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_tlOut_d_ready = coupler_to_mbusscratchpad00_tlIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_mbusscratchpad00_auto_tl_in_d_valid = coupler_to_mbusscratchpad00_tlIn_d_valid; // @[MixedNode.scala:551:17]
assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_opcode = coupler_to_mbusscratchpad00_tlIn_d_bits_opcode; // @[MixedNode.scala:551:17]
assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_param = coupler_to_mbusscratchpad00_tlIn_d_bits_param; // @[MixedNode.scala:551:17]
assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_size = coupler_to_mbusscratchpad00_tlIn_d_bits_size; // @[MixedNode.scala:551:17]
assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_source = coupler_to_mbusscratchpad00_tlIn_d_bits_source; // @[MixedNode.scala:551:17]
assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_sink = coupler_to_mbusscratchpad00_tlIn_d_bits_sink; // @[MixedNode.scala:551:17]
assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_denied = coupler_to_mbusscratchpad00_tlIn_d_bits_denied; // @[MixedNode.scala:551:17]
assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_data = coupler_to_mbusscratchpad00_tlIn_d_bits_data; // @[MixedNode.scala:551:17]
assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_corrupt = coupler_to_mbusscratchpad00_tlIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
assign childClock = clockSinkNodeIn_clock; // @[MixedNode.scala:551:17]
assign childReset = clockSinkNodeIn_reset; // @[MixedNode.scala:551:17]
assign bus_xingIn_a_ready = bus_xingOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign buffer_auto_in_a_valid = bus_xingOut_a_valid; // @[Buffer.scala:40:9]
assign buffer_auto_in_a_bits_opcode = bus_xingOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign buffer_auto_in_a_bits_param = bus_xingOut_a_bits_param; // @[Buffer.scala:40:9]
assign buffer_auto_in_a_bits_size = bus_xingOut_a_bits_size; // @[Buffer.scala:40:9]
assign buffer_auto_in_a_bits_source = bus_xingOut_a_bits_source; // @[Buffer.scala:40:9]
assign buffer_auto_in_a_bits_address = bus_xingOut_a_bits_address; // @[Buffer.scala:40:9]
assign buffer_auto_in_a_bits_mask = bus_xingOut_a_bits_mask; // @[Buffer.scala:40:9]
assign buffer_auto_in_a_bits_data = bus_xingOut_a_bits_data; // @[Buffer.scala:40:9]
assign buffer_auto_in_a_bits_corrupt = bus_xingOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_ready = bus_xingOut_d_ready; // @[Buffer.scala:40:9]
assign bus_xingIn_d_valid = bus_xingOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingIn_d_bits_opcode = bus_xingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingIn_d_bits_param = bus_xingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingIn_d_bits_size = bus_xingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingIn_d_bits_source = bus_xingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingIn_d_bits_sink = bus_xingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingIn_d_bits_denied = bus_xingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingIn_d_bits_data = bus_xingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingIn_d_bits_corrupt = bus_xingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign auto_bus_xing_in_a_ready_0 = bus_xingIn_a_ready; // @[ClockDomain.scala:14:9]
assign bus_xingOut_a_valid = bus_xingIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingOut_a_bits_opcode = bus_xingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingOut_a_bits_param = bus_xingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingOut_a_bits_size = bus_xingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingOut_a_bits_source = bus_xingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingOut_a_bits_address = bus_xingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingOut_a_bits_mask = bus_xingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingOut_a_bits_data = bus_xingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingOut_a_bits_corrupt = bus_xingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign bus_xingOut_d_ready = bus_xingIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_bus_xing_in_d_valid_0 = bus_xingIn_d_valid; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_opcode_0 = bus_xingIn_d_bits_opcode; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_param_0 = bus_xingIn_d_bits_param; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_size_0 = bus_xingIn_d_bits_size; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_source_0 = bus_xingIn_d_bits_source; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_sink_0 = bus_xingIn_d_bits_sink; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_denied_0 = bus_xingIn_d_bits_denied; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_data_0 = bus_xingIn_d_bits_data; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_corrupt_0 = bus_xingIn_d_bits_corrupt; // @[ClockDomain.scala:14:9]
wire fixer__T_1 = fixer_a_first & fixer__a_first_T; // @[Decoupled.scala:51:35]
wire fixer__T_3 = fixer_d_first & fixer__T_2; // @[Decoupled.scala:51:35]
always @(posedge childClock) begin // @[LazyModuleImp.scala:155:31]
if (childReset) begin // @[LazyModuleImp.scala:155:31, :158:31]
fixer_a_first_counter <= 3'h0; // @[Edges.scala:229:27]
fixer_d_first_counter <= 3'h0; // @[Edges.scala:229:27]
fixer_flight_0 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_1 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_2 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_3 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_4 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_5 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_6 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_7 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_8 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_9 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_10 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_11 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_12 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_13 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_14 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_15 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_16 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_17 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_18 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_19 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_SourceIdFIFOed <= 20'h0; // @[FIFOFixer.scala:115:35]
end
else begin // @[LazyModuleImp.scala:155:31]
if (fixer__a_first_T) // @[Decoupled.scala:51:35]
fixer_a_first_counter <= fixer__a_first_counter_T; // @[Edges.scala:229:27, :236:21]
if (fixer__d_first_T) // @[Decoupled.scala:51:35]
fixer_d_first_counter <= fixer__d_first_counter_T; // @[Edges.scala:229:27, :236:21]
fixer_flight_0 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h0) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h0 | fixer_flight_0); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_1 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h1) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h1 | fixer_flight_1); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_2 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h2) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h2 | fixer_flight_2); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_3 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h3) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h3 | fixer_flight_3); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_4 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h4) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h4 | fixer_flight_4); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_5 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h5) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h5 | fixer_flight_5); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_6 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h6) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h6 | fixer_flight_6); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_7 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h7) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h7 | fixer_flight_7); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_8 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h8) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h8 | fixer_flight_8); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_9 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h9) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h9 | fixer_flight_9); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_10 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hA) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hA | fixer_flight_10); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_11 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hB) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hB | fixer_flight_11); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_12 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hC) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hC | fixer_flight_12); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_13 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hD) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hD | fixer_flight_13); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_14 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hE) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hE | fixer_flight_14); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_15 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hF) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hF | fixer_flight_15); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_16 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h10) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h10 | fixer_flight_16); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_17 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h11) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h11 | fixer_flight_17); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_18 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h12) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h12 | fixer_flight_18); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_19 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h13) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h13 | fixer_flight_19); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_SourceIdFIFOed <= fixer__SourceIdFIFOed_T; // @[FIFOFixer.scala:115:35, :126:40]
end
always @(posedge)
FixedClockBroadcast_3 fixedClockNode ( // @[ClockGroup.scala:115:114]
.auto_anon_in_clock (clockGroup_auto_out_clock), // @[ClockGroup.scala:24:9]
.auto_anon_in_reset (clockGroup_auto_out_reset), // @[ClockGroup.scala:24:9]
.auto_anon_out_2_clock (auto_fixedClockNode_anon_out_1_clock_0),
.auto_anon_out_2_reset (auto_fixedClockNode_anon_out_1_reset_0),
.auto_anon_out_1_clock (auto_fixedClockNode_anon_out_0_clock_0),
.auto_anon_out_1_reset (auto_fixedClockNode_anon_out_0_reset_0),
.auto_anon_out_0_clock (clockSinkNodeIn_clock),
.auto_anon_out_0_reset (clockSinkNodeIn_reset)
); // @[ClockGroup.scala:115:114]
TLXbar_mbus_i1_o2_a32d64s5k1z3u mbus_xbar ( // @[MemoryBus.scala:47:32]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_anon_in_a_ready (fixer_auto_anon_out_a_ready),
.auto_anon_in_a_valid (fixer_auto_anon_out_a_valid), // @[FIFOFixer.scala:50:9]
.auto_anon_in_a_bits_opcode (fixer_auto_anon_out_a_bits_opcode), // @[FIFOFixer.scala:50:9]
.auto_anon_in_a_bits_param (fixer_auto_anon_out_a_bits_param), // @[FIFOFixer.scala:50:9]
.auto_anon_in_a_bits_size (fixer_auto_anon_out_a_bits_size), // @[FIFOFixer.scala:50:9]
.auto_anon_in_a_bits_source (fixer_auto_anon_out_a_bits_source), // @[FIFOFixer.scala:50:9]
.auto_anon_in_a_bits_address (fixer_auto_anon_out_a_bits_address), // @[FIFOFixer.scala:50:9]
.auto_anon_in_a_bits_mask (fixer_auto_anon_out_a_bits_mask), // @[FIFOFixer.scala:50:9]
.auto_anon_in_a_bits_data (fixer_auto_anon_out_a_bits_data), // @[FIFOFixer.scala:50:9]
.auto_anon_in_a_bits_corrupt (fixer_auto_anon_out_a_bits_corrupt), // @[FIFOFixer.scala:50:9]
.auto_anon_in_d_ready (fixer_auto_anon_out_d_ready), // @[FIFOFixer.scala:50:9]
.auto_anon_in_d_valid (fixer_auto_anon_out_d_valid),
.auto_anon_in_d_bits_opcode (fixer_auto_anon_out_d_bits_opcode),
.auto_anon_in_d_bits_param (fixer_auto_anon_out_d_bits_param),
.auto_anon_in_d_bits_size (fixer_auto_anon_out_d_bits_size),
.auto_anon_in_d_bits_source (fixer_auto_anon_out_d_bits_source),
.auto_anon_in_d_bits_sink (fixer_auto_anon_out_d_bits_sink),
.auto_anon_in_d_bits_denied (fixer_auto_anon_out_d_bits_denied),
.auto_anon_in_d_bits_data (fixer_auto_anon_out_d_bits_data),
.auto_anon_in_d_bits_corrupt (fixer_auto_anon_out_d_bits_corrupt),
.auto_anon_out_1_a_ready (_picker_auto_in_1_a_ready), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_a_valid (_mbus_xbar_auto_anon_out_1_a_valid),
.auto_anon_out_1_a_bits_opcode (_mbus_xbar_auto_anon_out_1_a_bits_opcode),
.auto_anon_out_1_a_bits_param (_mbus_xbar_auto_anon_out_1_a_bits_param),
.auto_anon_out_1_a_bits_size (_mbus_xbar_auto_anon_out_1_a_bits_size),
.auto_anon_out_1_a_bits_source (_mbus_xbar_auto_anon_out_1_a_bits_source),
.auto_anon_out_1_a_bits_address (_mbus_xbar_auto_anon_out_1_a_bits_address),
.auto_anon_out_1_a_bits_mask (_mbus_xbar_auto_anon_out_1_a_bits_mask),
.auto_anon_out_1_a_bits_data (_mbus_xbar_auto_anon_out_1_a_bits_data),
.auto_anon_out_1_a_bits_corrupt (_mbus_xbar_auto_anon_out_1_a_bits_corrupt),
.auto_anon_out_1_d_ready (_mbus_xbar_auto_anon_out_1_d_ready),
.auto_anon_out_1_d_valid (_picker_auto_in_1_d_valid), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_opcode (_picker_auto_in_1_d_bits_opcode), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_param (_picker_auto_in_1_d_bits_param), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_size (_picker_auto_in_1_d_bits_size), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_source (_picker_auto_in_1_d_bits_source), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_sink (_picker_auto_in_1_d_bits_sink), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_denied (_picker_auto_in_1_d_bits_denied), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_data (_picker_auto_in_1_d_bits_data), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_corrupt (_picker_auto_in_1_d_bits_corrupt), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_a_ready (_picker_auto_in_0_a_ready), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_a_valid (_mbus_xbar_auto_anon_out_0_a_valid),
.auto_anon_out_0_a_bits_opcode (_mbus_xbar_auto_anon_out_0_a_bits_opcode),
.auto_anon_out_0_a_bits_param (_mbus_xbar_auto_anon_out_0_a_bits_param),
.auto_anon_out_0_a_bits_size (_mbus_xbar_auto_anon_out_0_a_bits_size),
.auto_anon_out_0_a_bits_source (_mbus_xbar_auto_anon_out_0_a_bits_source),
.auto_anon_out_0_a_bits_address (_mbus_xbar_auto_anon_out_0_a_bits_address),
.auto_anon_out_0_a_bits_mask (_mbus_xbar_auto_anon_out_0_a_bits_mask),
.auto_anon_out_0_a_bits_data (_mbus_xbar_auto_anon_out_0_a_bits_data),
.auto_anon_out_0_a_bits_corrupt (_mbus_xbar_auto_anon_out_0_a_bits_corrupt),
.auto_anon_out_0_d_ready (_mbus_xbar_auto_anon_out_0_d_ready),
.auto_anon_out_0_d_valid (_picker_auto_in_0_d_valid), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_opcode (_picker_auto_in_0_d_bits_opcode), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_size (_picker_auto_in_0_d_bits_size), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_source (_picker_auto_in_0_d_bits_source), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_denied (_picker_auto_in_0_d_bits_denied), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_data (_picker_auto_in_0_d_bits_data), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_corrupt (_picker_auto_in_0_d_bits_corrupt) // @[ProbePicker.scala:69:28]
); // @[MemoryBus.scala:47:32]
ProbePicker picker ( // @[ProbePicker.scala:69:28]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_in_1_a_ready (_picker_auto_in_1_a_ready),
.auto_in_1_a_valid (_mbus_xbar_auto_anon_out_1_a_valid), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_opcode (_mbus_xbar_auto_anon_out_1_a_bits_opcode), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_param (_mbus_xbar_auto_anon_out_1_a_bits_param), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_size (_mbus_xbar_auto_anon_out_1_a_bits_size), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_source (_mbus_xbar_auto_anon_out_1_a_bits_source), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_address (_mbus_xbar_auto_anon_out_1_a_bits_address), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_mask (_mbus_xbar_auto_anon_out_1_a_bits_mask), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_data (_mbus_xbar_auto_anon_out_1_a_bits_data), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_corrupt (_mbus_xbar_auto_anon_out_1_a_bits_corrupt), // @[MemoryBus.scala:47:32]
.auto_in_1_d_ready (_mbus_xbar_auto_anon_out_1_d_ready), // @[MemoryBus.scala:47:32]
.auto_in_1_d_valid (_picker_auto_in_1_d_valid),
.auto_in_1_d_bits_opcode (_picker_auto_in_1_d_bits_opcode),
.auto_in_1_d_bits_param (_picker_auto_in_1_d_bits_param),
.auto_in_1_d_bits_size (_picker_auto_in_1_d_bits_size),
.auto_in_1_d_bits_source (_picker_auto_in_1_d_bits_source),
.auto_in_1_d_bits_sink (_picker_auto_in_1_d_bits_sink),
.auto_in_1_d_bits_denied (_picker_auto_in_1_d_bits_denied),
.auto_in_1_d_bits_data (_picker_auto_in_1_d_bits_data),
.auto_in_1_d_bits_corrupt (_picker_auto_in_1_d_bits_corrupt),
.auto_in_0_a_ready (_picker_auto_in_0_a_ready),
.auto_in_0_a_valid (_mbus_xbar_auto_anon_out_0_a_valid), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_opcode (_mbus_xbar_auto_anon_out_0_a_bits_opcode), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_param (_mbus_xbar_auto_anon_out_0_a_bits_param), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_size (_mbus_xbar_auto_anon_out_0_a_bits_size), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_source (_mbus_xbar_auto_anon_out_0_a_bits_source), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_address (_mbus_xbar_auto_anon_out_0_a_bits_address), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_mask (_mbus_xbar_auto_anon_out_0_a_bits_mask), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_data (_mbus_xbar_auto_anon_out_0_a_bits_data), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_corrupt (_mbus_xbar_auto_anon_out_0_a_bits_corrupt), // @[MemoryBus.scala:47:32]
.auto_in_0_d_ready (_mbus_xbar_auto_anon_out_0_d_ready), // @[MemoryBus.scala:47:32]
.auto_in_0_d_valid (_picker_auto_in_0_d_valid),
.auto_in_0_d_bits_opcode (_picker_auto_in_0_d_bits_opcode),
.auto_in_0_d_bits_size (_picker_auto_in_0_d_bits_size),
.auto_in_0_d_bits_source (_picker_auto_in_0_d_bits_source),
.auto_in_0_d_bits_denied (_picker_auto_in_0_d_bits_denied),
.auto_in_0_d_bits_data (_picker_auto_in_0_d_bits_data),
.auto_in_0_d_bits_corrupt (_picker_auto_in_0_d_bits_corrupt),
.auto_out_1_a_ready (coupler_to_mbusscratchpad00_auto_tl_in_a_ready), // @[LazyModuleImp.scala:138:7]
.auto_out_1_a_valid (coupler_to_mbusscratchpad00_auto_tl_in_a_valid),
.auto_out_1_a_bits_opcode (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_opcode),
.auto_out_1_a_bits_param (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_param),
.auto_out_1_a_bits_size (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_size),
.auto_out_1_a_bits_source (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_source),
.auto_out_1_a_bits_address (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_address),
.auto_out_1_a_bits_mask (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_mask),
.auto_out_1_a_bits_data (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_data),
.auto_out_1_a_bits_corrupt (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_corrupt),
.auto_out_1_d_ready (coupler_to_mbusscratchpad00_auto_tl_in_d_ready),
.auto_out_1_d_valid (coupler_to_mbusscratchpad00_auto_tl_in_d_valid), // @[LazyModuleImp.scala:138:7]
.auto_out_1_d_bits_opcode (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_opcode), // @[LazyModuleImp.scala:138:7]
.auto_out_1_d_bits_param (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_param), // @[LazyModuleImp.scala:138:7]
.auto_out_1_d_bits_size (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_size), // @[LazyModuleImp.scala:138:7]
.auto_out_1_d_bits_source (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_source), // @[LazyModuleImp.scala:138:7]
.auto_out_1_d_bits_sink (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_sink), // @[LazyModuleImp.scala:138:7]
.auto_out_1_d_bits_denied (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_denied), // @[LazyModuleImp.scala:138:7]
.auto_out_1_d_bits_data (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_data), // @[LazyModuleImp.scala:138:7]
.auto_out_1_d_bits_corrupt (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_corrupt), // @[LazyModuleImp.scala:138:7]
.auto_out_0_a_ready (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_a_ready), // @[LazyScope.scala:98:27]
.auto_out_0_a_valid (_picker_auto_out_0_a_valid),
.auto_out_0_a_bits_opcode (_picker_auto_out_0_a_bits_opcode),
.auto_out_0_a_bits_param (_picker_auto_out_0_a_bits_param),
.auto_out_0_a_bits_size (_picker_auto_out_0_a_bits_size),
.auto_out_0_a_bits_source (_picker_auto_out_0_a_bits_source),
.auto_out_0_a_bits_address (_picker_auto_out_0_a_bits_address),
.auto_out_0_a_bits_mask (_picker_auto_out_0_a_bits_mask),
.auto_out_0_a_bits_data (_picker_auto_out_0_a_bits_data),
.auto_out_0_a_bits_corrupt (_picker_auto_out_0_a_bits_corrupt),
.auto_out_0_d_ready (_picker_auto_out_0_d_ready),
.auto_out_0_d_valid (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_valid), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_opcode (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_opcode), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_size (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_size), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_source (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_source), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_denied (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_denied), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_data (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_data), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_corrupt (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_corrupt) // @[LazyScope.scala:98:27]
); // @[ProbePicker.scala:69:28]
TLInterconnectCoupler_mbus_to_memory_controller_port_named_tl_mem coupler_to_memory_controller_port_named_tl_mem ( // @[LazyScope.scala:98:27]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[LazyScope.scala:98:27]
TLInterconnectCoupler_mbus_to_memory_controller_port_named_axi4 coupler_to_memory_controller_port_named_axi4 ( // @[LazyScope.scala:98:27]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_widget_anon_in_a_ready (xbar_auto_anon_out_a_ready),
.auto_widget_anon_in_a_valid (xbar_auto_anon_out_a_valid), // @[Xbar.scala:74:9]
.auto_widget_anon_in_a_bits_opcode (xbar_auto_anon_out_a_bits_opcode), // @[Xbar.scala:74:9]
.auto_widget_anon_in_a_bits_param (xbar_auto_anon_out_a_bits_param), // @[Xbar.scala:74:9]
.auto_widget_anon_in_a_bits_size (xbar_auto_anon_out_a_bits_size), // @[Xbar.scala:74:9]
.auto_widget_anon_in_a_bits_source (xbar_auto_anon_out_a_bits_source), // @[Xbar.scala:74:9]
.auto_widget_anon_in_a_bits_address (xbar_auto_anon_out_a_bits_address), // @[Xbar.scala:74:9]
.auto_widget_anon_in_a_bits_mask (xbar_auto_anon_out_a_bits_mask), // @[Xbar.scala:74:9]
.auto_widget_anon_in_a_bits_data (xbar_auto_anon_out_a_bits_data), // @[Xbar.scala:74:9]
.auto_widget_anon_in_a_bits_corrupt (xbar_auto_anon_out_a_bits_corrupt), // @[Xbar.scala:74:9]
.auto_widget_anon_in_d_ready (xbar_auto_anon_out_d_ready), // @[Xbar.scala:74:9]
.auto_widget_anon_in_d_valid (xbar_auto_anon_out_d_valid),
.auto_widget_anon_in_d_bits_opcode (xbar_auto_anon_out_d_bits_opcode),
.auto_widget_anon_in_d_bits_size (xbar_auto_anon_out_d_bits_size),
.auto_widget_anon_in_d_bits_source (xbar_auto_anon_out_d_bits_source),
.auto_widget_anon_in_d_bits_denied (xbar_auto_anon_out_d_bits_denied),
.auto_widget_anon_in_d_bits_data (xbar_auto_anon_out_d_bits_data),
.auto_widget_anon_in_d_bits_corrupt (xbar_auto_anon_out_d_bits_corrupt),
.auto_axi4yank_out_aw_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_ready_0), // @[ClockDomain.scala:14:9]
.auto_axi4yank_out_aw_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid_0),
.auto_axi4yank_out_aw_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id_0),
.auto_axi4yank_out_aw_bits_addr (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr_0),
.auto_axi4yank_out_aw_bits_len (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len_0),
.auto_axi4yank_out_aw_bits_size (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size_0),
.auto_axi4yank_out_aw_bits_burst (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst_0),
.auto_axi4yank_out_aw_bits_lock (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock_0),
.auto_axi4yank_out_aw_bits_cache (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache_0),
.auto_axi4yank_out_aw_bits_prot (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot_0),
.auto_axi4yank_out_aw_bits_qos (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos_0),
.auto_axi4yank_out_w_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_ready_0), // @[ClockDomain.scala:14:9]
.auto_axi4yank_out_w_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid_0),
.auto_axi4yank_out_w_bits_data (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data_0),
.auto_axi4yank_out_w_bits_strb (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb_0),
.auto_axi4yank_out_w_bits_last (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last_0),
.auto_axi4yank_out_b_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready_0),
.auto_axi4yank_out_b_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_valid_0), // @[ClockDomain.scala:14:9]
.auto_axi4yank_out_b_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_id_0), // @[ClockDomain.scala:14:9]
.auto_axi4yank_out_b_bits_resp (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_resp_0), // @[ClockDomain.scala:14:9]
.auto_axi4yank_out_ar_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_ready_0), // @[ClockDomain.scala:14:9]
.auto_axi4yank_out_ar_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid_0),
.auto_axi4yank_out_ar_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id_0),
.auto_axi4yank_out_ar_bits_addr (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr_0),
.auto_axi4yank_out_ar_bits_len (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len_0),
.auto_axi4yank_out_ar_bits_size (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size_0),
.auto_axi4yank_out_ar_bits_burst (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst_0),
.auto_axi4yank_out_ar_bits_lock (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock_0),
.auto_axi4yank_out_ar_bits_cache (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache_0),
.auto_axi4yank_out_ar_bits_prot (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot_0),
.auto_axi4yank_out_ar_bits_qos (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos_0),
.auto_axi4yank_out_r_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready_0),
.auto_axi4yank_out_r_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_valid_0), // @[ClockDomain.scala:14:9]
.auto_axi4yank_out_r_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_id_0), // @[ClockDomain.scala:14:9]
.auto_axi4yank_out_r_bits_data (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_data_0), // @[ClockDomain.scala:14:9]
.auto_axi4yank_out_r_bits_resp (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_resp_0), // @[ClockDomain.scala:14:9]
.auto_axi4yank_out_r_bits_last (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_last_0), // @[ClockDomain.scala:14:9]
.auto_tl_in_a_ready (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_a_ready),
.auto_tl_in_a_valid (_picker_auto_out_0_a_valid), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_opcode (_picker_auto_out_0_a_bits_opcode), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_param (_picker_auto_out_0_a_bits_param), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_size (_picker_auto_out_0_a_bits_size), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_source (_picker_auto_out_0_a_bits_source), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_address (_picker_auto_out_0_a_bits_address), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_mask (_picker_auto_out_0_a_bits_mask), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_data (_picker_auto_out_0_a_bits_data), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_corrupt (_picker_auto_out_0_a_bits_corrupt), // @[ProbePicker.scala:69:28]
.auto_tl_in_d_ready (_picker_auto_out_0_d_ready), // @[ProbePicker.scala:69:28]
.auto_tl_in_d_valid (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_valid),
.auto_tl_in_d_bits_opcode (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_opcode),
.auto_tl_in_d_bits_size (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_size),
.auto_tl_in_d_bits_source (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_source),
.auto_tl_in_d_bits_denied (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_denied),
.auto_tl_in_d_bits_data (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_data),
.auto_tl_in_d_bits_corrupt (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_corrupt),
.auto_tl_out_a_ready (xbar_auto_anon_in_a_ready), // @[Xbar.scala:74:9]
.auto_tl_out_a_valid (xbar_auto_anon_in_a_valid),
.auto_tl_out_a_bits_opcode (xbar_auto_anon_in_a_bits_opcode),
.auto_tl_out_a_bits_param (xbar_auto_anon_in_a_bits_param),
.auto_tl_out_a_bits_size (xbar_auto_anon_in_a_bits_size),
.auto_tl_out_a_bits_source (xbar_auto_anon_in_a_bits_source),
.auto_tl_out_a_bits_address (xbar_auto_anon_in_a_bits_address),
.auto_tl_out_a_bits_mask (xbar_auto_anon_in_a_bits_mask),
.auto_tl_out_a_bits_data (xbar_auto_anon_in_a_bits_data),
.auto_tl_out_a_bits_corrupt (xbar_auto_anon_in_a_bits_corrupt),
.auto_tl_out_d_ready (xbar_auto_anon_in_d_ready),
.auto_tl_out_d_valid (xbar_auto_anon_in_d_valid), // @[Xbar.scala:74:9]
.auto_tl_out_d_bits_opcode (xbar_auto_anon_in_d_bits_opcode), // @[Xbar.scala:74:9]
.auto_tl_out_d_bits_size (xbar_auto_anon_in_d_bits_size), // @[Xbar.scala:74:9]
.auto_tl_out_d_bits_source (xbar_auto_anon_in_d_bits_source), // @[Xbar.scala:74:9]
.auto_tl_out_d_bits_denied (xbar_auto_anon_in_d_bits_denied), // @[Xbar.scala:74:9]
.auto_tl_out_d_bits_data (xbar_auto_anon_in_d_bits_data), // @[Xbar.scala:74:9]
.auto_tl_out_d_bits_corrupt (xbar_auto_anon_in_d_bits_corrupt) // @[Xbar.scala:74:9]
); // @[LazyScope.scala:98:27]
TLBuffer_a28d64s5k1z3u buffer_1 ( // @[Buffer.scala:75:28]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_in_a_ready (coupler_to_mbusscratchpad00_auto_tl_out_a_ready),
.auto_in_a_valid (coupler_to_mbusscratchpad00_auto_tl_out_a_valid), // @[LazyModuleImp.scala:138:7]
.auto_in_a_bits_opcode (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_opcode), // @[LazyModuleImp.scala:138:7]
.auto_in_a_bits_param (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_param), // @[LazyModuleImp.scala:138:7]
.auto_in_a_bits_size (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_size), // @[LazyModuleImp.scala:138:7]
.auto_in_a_bits_source (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_source), // @[LazyModuleImp.scala:138:7]
.auto_in_a_bits_address (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_address), // @[LazyModuleImp.scala:138:7]
.auto_in_a_bits_mask (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_mask), // @[LazyModuleImp.scala:138:7]
.auto_in_a_bits_data (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_data), // @[LazyModuleImp.scala:138:7]
.auto_in_a_bits_corrupt (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_corrupt), // @[LazyModuleImp.scala:138:7]
.auto_in_d_ready (coupler_to_mbusscratchpad00_auto_tl_out_d_ready), // @[LazyModuleImp.scala:138:7]
.auto_in_d_valid (coupler_to_mbusscratchpad00_auto_tl_out_d_valid),
.auto_in_d_bits_opcode (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_opcode),
.auto_in_d_bits_param (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_param),
.auto_in_d_bits_size (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_size),
.auto_in_d_bits_source (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_source),
.auto_in_d_bits_sink (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_sink),
.auto_in_d_bits_denied (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_denied),
.auto_in_d_bits_data (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_data),
.auto_in_d_bits_corrupt (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_corrupt),
.auto_out_a_ready (auto_buffer_out_a_ready_0), // @[ClockDomain.scala:14:9]
.auto_out_a_valid (auto_buffer_out_a_valid_0),
.auto_out_a_bits_opcode (auto_buffer_out_a_bits_opcode_0),
.auto_out_a_bits_param (auto_buffer_out_a_bits_param_0),
.auto_out_a_bits_size (auto_buffer_out_a_bits_size_0),
.auto_out_a_bits_source (auto_buffer_out_a_bits_source_0),
.auto_out_a_bits_address (auto_buffer_out_a_bits_address_0),
.auto_out_a_bits_mask (auto_buffer_out_a_bits_mask_0),
.auto_out_a_bits_data (auto_buffer_out_a_bits_data_0),
.auto_out_a_bits_corrupt (auto_buffer_out_a_bits_corrupt_0),
.auto_out_d_ready (auto_buffer_out_d_ready_0),
.auto_out_d_valid (auto_buffer_out_d_valid_0), // @[ClockDomain.scala:14:9]
.auto_out_d_bits_opcode (auto_buffer_out_d_bits_opcode_0), // @[ClockDomain.scala:14:9]
.auto_out_d_bits_param (auto_buffer_out_d_bits_param_0), // @[ClockDomain.scala:14:9]
.auto_out_d_bits_size (auto_buffer_out_d_bits_size_0), // @[ClockDomain.scala:14:9]
.auto_out_d_bits_source (auto_buffer_out_d_bits_source_0), // @[ClockDomain.scala:14:9]
.auto_out_d_bits_sink (auto_buffer_out_d_bits_sink_0), // @[ClockDomain.scala:14:9]
.auto_out_d_bits_denied (auto_buffer_out_d_bits_denied_0), // @[ClockDomain.scala:14:9]
.auto_out_d_bits_data (auto_buffer_out_d_bits_data_0), // @[ClockDomain.scala:14:9]
.auto_out_d_bits_corrupt (auto_buffer_out_d_bits_corrupt_0) // @[ClockDomain.scala:14:9]
); // @[Buffer.scala:75:28]
assign auto_buffer_out_a_valid = auto_buffer_out_a_valid_0; // @[ClockDomain.scala:14:9]
assign auto_buffer_out_a_bits_opcode = auto_buffer_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_buffer_out_a_bits_param = auto_buffer_out_a_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_buffer_out_a_bits_size = auto_buffer_out_a_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_buffer_out_a_bits_source = auto_buffer_out_a_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_buffer_out_a_bits_address = auto_buffer_out_a_bits_address_0; // @[ClockDomain.scala:14:9]
assign auto_buffer_out_a_bits_mask = auto_buffer_out_a_bits_mask_0; // @[ClockDomain.scala:14:9]
assign auto_buffer_out_a_bits_data = auto_buffer_out_a_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_buffer_out_a_bits_corrupt = auto_buffer_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
assign auto_buffer_out_d_ready = auto_buffer_out_d_ready_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready_0; // @[ClockDomain.scala:14:9]
assign auto_fixedClockNode_anon_out_1_clock = auto_fixedClockNode_anon_out_1_clock_0; // @[ClockDomain.scala:14:9]
assign auto_fixedClockNode_anon_out_1_reset = auto_fixedClockNode_anon_out_1_reset_0; // @[ClockDomain.scala:14:9]
assign auto_fixedClockNode_anon_out_0_clock = auto_fixedClockNode_anon_out_0_clock_0; // @[ClockDomain.scala:14:9]
assign auto_fixedClockNode_anon_out_0_reset = auto_fixedClockNode_anon_out_0_reset_0; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_a_ready = auto_bus_xing_in_a_ready_0; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_valid = auto_bus_xing_in_d_valid_0; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_opcode = auto_bus_xing_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_param = auto_bus_xing_in_d_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_size = auto_bus_xing_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_source = auto_bus_xing_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_sink = auto_bus_xing_in_d_bits_sink_0; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_denied = auto_bus_xing_in_d_bits_denied_0; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_data = auto_bus_xing_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_bus_xing_in_d_bits_corrupt = auto_bus_xing_in_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundRawFNToRecFN_e5_s11_15( // @[RoundAnyRawFNToRecFN.scala:295:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_infiniteExc, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [6:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [13:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [16:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_infiniteExc_0 = io_infiniteExc; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [6:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [13:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire [16:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
RoundAnyRawFNToRecFN_ie5_is13_oe5_os11_15 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15]
.io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_infiniteExc (io_infiniteExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_roundingMode (io_roundingMode_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags_0)
); // @[RoundAnyRawFNToRecFN.scala:310:15]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File AsyncResetReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
/** This black-boxes an Async Reset
* (or Set)
* Register.
*
* Because Chisel doesn't support
* parameterized black boxes,
* we unfortunately have to
* instantiate a number of these.
*
* We also have to hard-code the set/
* reset behavior.
*
* Do not confuse an asynchronous
* reset signal with an asynchronously
* reset reg. You should still
* properly synchronize your reset
* deassertion.
*
* @param d Data input
* @param q Data Output
* @param clk Clock Input
* @param rst Reset Input
* @param en Write Enable Input
*
*/
class AsyncResetReg(resetValue: Int = 0) extends RawModule {
val io = IO(new Bundle {
val d = Input(Bool())
val q = Output(Bool())
val en = Input(Bool())
val clk = Input(Clock())
val rst = Input(Reset())
})
val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
class SimpleRegIO(val w: Int) extends Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
}
class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module {
override def desiredName = s"AsyncResetRegVec_w${w}_i${init}"
val io = IO(new SimpleRegIO(w))
val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
object AsyncResetReg {
// Create Single Registers
def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = {
val reg = Module(new AsyncResetReg(if (init) 1 else 0))
reg.io.d := d
reg.io.clk := clk
reg.io.rst := rst
reg.io.en := true.B
name.foreach(reg.suggestName(_))
reg.io.q
}
def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None)
def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name))
// Create Vectors of Registers
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = {
val w = updateData.getWidth max resetData.bitLength
val reg = Module(new AsyncResetRegVec(w, resetData))
name.foreach(reg.suggestName(_))
reg.io.d := updateData
reg.io.en := enable
reg.io.q
}
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData,
resetData, enable, Some(name))
def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B)
def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name))
def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable)
def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name))
def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B)
def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name))
}
| module IntSyncCrossingSource_n1x1_38( // @[Crossing.scala:41:9]
input clock, // @[Crossing.scala:41:9]
input reset, // @[Crossing.scala:41:9]
input auto_in_0, // @[LazyModuleImp.scala:107:25]
output auto_out_sync_0 // @[LazyModuleImp.scala:107:25]
);
wire auto_in_0_0 = auto_in_0; // @[Crossing.scala:41:9]
wire nodeIn_0 = auto_in_0_0; // @[Crossing.scala:41:9]
wire nodeOut_sync_0; // @[MixedNode.scala:542:17]
wire auto_out_sync_0_0; // @[Crossing.scala:41:9]
assign auto_out_sync_0_0 = nodeOut_sync_0; // @[Crossing.scala:41:9]
AsyncResetRegVec_w1_i0_38 reg_0 ( // @[AsyncResetReg.scala:86:21]
.clock (clock),
.reset (reset),
.io_d (nodeIn_0), // @[MixedNode.scala:551:17]
.io_q (nodeOut_sync_0)
); // @[AsyncResetReg.scala:86:21]
assign auto_out_sync_0 = auto_out_sync_0_0; // @[Crossing.scala:41:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_46( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_58( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_71 io_out_sink_extend ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_214( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_17( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_492( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_398( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid, // @[PE.scala:35:14]
output io_bad_dataflow // @[PE.scala:35:14]
);
wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24]
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [31:0] c1; // @[PE.scala:70:15]
wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [31:0] c2; // @[PE.scala:71:15]
wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25]
wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61]
wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38]
wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38]
assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16]
assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10]
wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10]
c1 <= _GEN_7; // @[PE.scala:70:15, :124:10]
if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30]
end
else // @[PE.scala:71:15, :118:101, :119:30]
c2 <= _GEN_7; // @[PE.scala:71:15, :124:10]
end
else begin // @[PE.scala:31:7]
c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10]
c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10]
end
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
end
always @(posedge)
MacUnit_142 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24]
.io_out_d (_mac_unit_io_out_d)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_23( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_33 io_out_source_valid ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_16( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_16 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w4_d3_i0_4( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input [3:0] io_d, // @[ShiftReg.scala:36:14]
output [3:0] io_q // @[ShiftReg.scala:36:14]
);
wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21]
wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14]
wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7]
wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_1; // @[ShiftReg.scala:48:24]
wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_2; // @[ShiftReg.scala:48:24]
wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_3; // @[ShiftReg.scala:48:24]
wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14]
wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14]
assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14]
assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_67 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_68 output_chain_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_2), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_3), // @[SynchronizerReg.scala:87:41]
.io_q (output_1)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_69 output_chain_2 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_4), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_5), // @[SynchronizerReg.scala:87:41]
.io_q (output_2)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_70 output_chain_3 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_6), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_7), // @[SynchronizerReg.scala:87:41]
.io_q (output_3)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Atomics.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
class Atomics(params: TLBundleParameters) extends Module
{
val io = IO(new Bundle {
val write = Flipped(Bool()) // ignore opcode
val a = Flipped(new TLBundleA(params))
val data_in = Flipped(UInt(params.dataBits.W))
val data_out = UInt(params.dataBits.W)
})
// Arithmetic, what to do
val adder = io.a.param(2)
val unsigned = io.a.param(1)
val take_max = io.a.param(0)
val signBit = io.a.mask & Cat(1.U, ~io.a.mask >> 1)
val inv_d = Mux(adder, io.data_in, ~io.data_in)
val sum = (FillInterleaved(8, io.a.mask) & io.a.data) + inv_d
def sign(x: UInt): Bool = (Cat(x.asBools.grouped(8).map(_.last).toList.reverse) & signBit).orR
val sign_a = sign(io.a.data)
val sign_d = sign(io.data_in)
val sign_s = sign(sum)
val a_bigger_uneq = unsigned === sign_a // result if high bits are unequal
val a_bigger = Mux(sign_a === sign_d, !sign_s, a_bigger_uneq)
val pick_a = take_max === a_bigger
// Logical, what to do
val lut = VecInit(Seq(
(0x6).U, // XOR
(0xe).U, // OR
(0x8).U, // AND
(0xc).U))( // SWAP
io.a.param(1,0))
val logical = Cat((io.a.data.asBools zip io.data_in.asBools).map { case (a, d) =>
lut(Cat(a, d))
}.reverse)
// Operation, what to do? (0=d, 1=a, 2=sum, 3=logical)
val select = Mux(io.write, 1.U, VecInit(Seq(
1.U, // PutFullData
1.U, // PutPartialData
Mux(adder, 2.U, Mux(pick_a, 1.U, 0.U)), // ArithmeticData
3.U, // LogicalData
0.U, // Get
0.U, // Hint
0.U, // AcquireBlock
0.U))( // AcquirePerm
io.a.opcode))
// Only the masked bytes can be modified
val selects = io.a.mask.asBools.map(b => Mux(b, select, 0.U))
io.data_out := Cat(selects.zipWithIndex.map { case (s, i) =>
VecInit(Seq(io.data_in, io.a.data, sum, logical).map(_((i + 1) * 8 - 1, i * 8)))(s)
}.reverse)
}
| module Atomics_6( // @[Atomics.scala:8:7]
input clock, // @[Atomics.scala:8:7]
input reset, // @[Atomics.scala:8:7]
input io_write, // @[Atomics.scala:10:14]
input [2:0] io_a_opcode, // @[Atomics.scala:10:14]
input [2:0] io_a_param, // @[Atomics.scala:10:14]
input [15:0] io_a_mask, // @[Atomics.scala:10:14]
input [127:0] io_a_data, // @[Atomics.scala:10:14]
input [127:0] io_data_in, // @[Atomics.scala:10:14]
output [127:0] io_data_out // @[Atomics.scala:10:14]
);
wire io_write_0 = io_write; // @[Atomics.scala:8:7]
wire [2:0] io_a_opcode_0 = io_a_opcode; // @[Atomics.scala:8:7]
wire [2:0] io_a_param_0 = io_a_param; // @[Atomics.scala:8:7]
wire [15:0] io_a_mask_0 = io_a_mask; // @[Atomics.scala:8:7]
wire [127:0] io_a_data_0 = io_a_data; // @[Atomics.scala:8:7]
wire [127:0] io_data_in_0 = io_data_in; // @[Atomics.scala:8:7]
wire [3:0][3:0] _GEN = '{4'hC, 4'h8, 4'hE, 4'h6};
wire [3:0] _lut_WIRE_0 = 4'h6; // @[Atomics.scala:34:20]
wire [3:0] _lut_WIRE_1 = 4'hE; // @[Atomics.scala:34:20]
wire [3:0] _lut_WIRE_2 = 4'h8; // @[Atomics.scala:34:20]
wire [3:0] _lut_WIRE_3 = 4'hC; // @[Atomics.scala:34:20]
wire [1:0] _select_WIRE_0 = 2'h1; // @[Atomics.scala:45:42]
wire [1:0] _select_WIRE_1 = 2'h1; // @[Atomics.scala:45:42]
wire [1:0] _select_WIRE_3 = 2'h3; // @[Atomics.scala:45:42]
wire [1:0] _select_WIRE_4 = 2'h0; // @[Atomics.scala:45:42]
wire [1:0] _select_WIRE_5 = 2'h0; // @[Atomics.scala:45:42]
wire [1:0] _select_WIRE_6 = 2'h0; // @[Atomics.scala:45:42]
wire [1:0] _select_WIRE_7 = 2'h0; // @[Atomics.scala:45:42]
wire io_a_corrupt = 1'h0; // @[Atomics.scala:8:7, :10:14]
wire [31:0] io_a_address = 32'h0; // @[Atomics.scala:8:7, :10:14]
wire [5:0] io_a_source = 6'h0; // @[Atomics.scala:8:7, :10:14]
wire [2:0] io_a_size = 3'h0; // @[Atomics.scala:8:7, :10:14]
wire [127:0] _io_data_out_T_64; // @[Atomics.scala:58:21]
wire [127:0] io_data_out_0; // @[Atomics.scala:8:7]
wire adder = io_a_param_0[2]; // @[Atomics.scala:8:7, :18:28]
wire unsigned_0 = io_a_param_0[1]; // @[Atomics.scala:8:7, :19:28]
wire take_max = io_a_param_0[0]; // @[Atomics.scala:8:7, :20:28]
wire [15:0] _signBit_T = ~io_a_mask_0; // @[Atomics.scala:8:7, :22:38]
wire [14:0] _signBit_T_1 = _signBit_T[15:1]; // @[Atomics.scala:22:{38,49}]
wire [15:0] _signBit_T_2 = {1'h1, _signBit_T_1}; // @[Atomics.scala:22:{32,49}]
wire [15:0] signBit = io_a_mask_0 & _signBit_T_2; // @[Atomics.scala:8:7, :22:{27,32}]
wire [127:0] _inv_d_T = ~io_data_in_0; // @[Atomics.scala:8:7, :23:38]
wire [127:0] inv_d = adder ? io_data_in_0 : _inv_d_T; // @[Atomics.scala:8:7, :18:28, :23:{18,38}]
wire _sum_T = io_a_mask_0[0]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T = io_a_mask_0[0]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_1 = io_a_mask_0[1]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_1 = io_a_mask_0[1]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_2 = io_a_mask_0[2]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_2 = io_a_mask_0[2]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_3 = io_a_mask_0[3]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_3 = io_a_mask_0[3]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_4 = io_a_mask_0[4]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_4 = io_a_mask_0[4]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_5 = io_a_mask_0[5]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_5 = io_a_mask_0[5]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_6 = io_a_mask_0[6]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_6 = io_a_mask_0[6]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_7 = io_a_mask_0[7]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_7 = io_a_mask_0[7]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_8 = io_a_mask_0[8]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_8 = io_a_mask_0[8]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_9 = io_a_mask_0[9]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_9 = io_a_mask_0[9]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_10 = io_a_mask_0[10]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_10 = io_a_mask_0[10]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_11 = io_a_mask_0[11]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_11 = io_a_mask_0[11]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_12 = io_a_mask_0[12]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_12 = io_a_mask_0[12]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_13 = io_a_mask_0[13]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_13 = io_a_mask_0[13]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_14 = io_a_mask_0[14]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_14 = io_a_mask_0[14]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire _sum_T_15 = io_a_mask_0[15]; // @[Atomics.scala:8:7, :24:29]
wire _selects_T_15 = io_a_mask_0[15]; // @[Atomics.scala:8:7, :24:29, :57:27]
wire [7:0] _sum_T_16 = {8{_sum_T}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_17 = {8{_sum_T_1}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_18 = {8{_sum_T_2}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_19 = {8{_sum_T_3}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_20 = {8{_sum_T_4}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_21 = {8{_sum_T_5}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_22 = {8{_sum_T_6}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_23 = {8{_sum_T_7}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_24 = {8{_sum_T_8}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_25 = {8{_sum_T_9}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_26 = {8{_sum_T_10}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_27 = {8{_sum_T_11}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_28 = {8{_sum_T_12}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_29 = {8{_sum_T_13}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_30 = {8{_sum_T_14}}; // @[Atomics.scala:24:29]
wire [7:0] _sum_T_31 = {8{_sum_T_15}}; // @[Atomics.scala:24:29]
wire [15:0] sum_lo_lo_lo = {_sum_T_17, _sum_T_16}; // @[Atomics.scala:24:29]
wire [15:0] sum_lo_lo_hi = {_sum_T_19, _sum_T_18}; // @[Atomics.scala:24:29]
wire [31:0] sum_lo_lo = {sum_lo_lo_hi, sum_lo_lo_lo}; // @[Atomics.scala:24:29]
wire [15:0] sum_lo_hi_lo = {_sum_T_21, _sum_T_20}; // @[Atomics.scala:24:29]
wire [15:0] sum_lo_hi_hi = {_sum_T_23, _sum_T_22}; // @[Atomics.scala:24:29]
wire [31:0] sum_lo_hi = {sum_lo_hi_hi, sum_lo_hi_lo}; // @[Atomics.scala:24:29]
wire [63:0] sum_lo = {sum_lo_hi, sum_lo_lo}; // @[Atomics.scala:24:29]
wire [15:0] sum_hi_lo_lo = {_sum_T_25, _sum_T_24}; // @[Atomics.scala:24:29]
wire [15:0] sum_hi_lo_hi = {_sum_T_27, _sum_T_26}; // @[Atomics.scala:24:29]
wire [31:0] sum_hi_lo = {sum_hi_lo_hi, sum_hi_lo_lo}; // @[Atomics.scala:24:29]
wire [15:0] sum_hi_hi_lo = {_sum_T_29, _sum_T_28}; // @[Atomics.scala:24:29]
wire [15:0] sum_hi_hi_hi = {_sum_T_31, _sum_T_30}; // @[Atomics.scala:24:29]
wire [31:0] sum_hi_hi = {sum_hi_hi_hi, sum_hi_hi_lo}; // @[Atomics.scala:24:29]
wire [63:0] sum_hi = {sum_hi_hi, sum_hi_lo}; // @[Atomics.scala:24:29]
wire [127:0] _sum_T_32 = {sum_hi, sum_lo}; // @[Atomics.scala:24:29]
wire [127:0] _sum_T_33 = _sum_T_32 & io_a_data_0; // @[Atomics.scala:8:7, :24:{29,44}]
wire [128:0] _sum_T_34 = {1'h0, _sum_T_33} + {1'h0, inv_d}; // @[Atomics.scala:8:7, :10:14, :23:18, :24:{44,57}]
wire [127:0] sum = _sum_T_34[127:0]; // @[Atomics.scala:24:57]
wire _sign_a_T = io_a_data_0[0]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T = io_a_data_0[0]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_1 = io_a_data_0[1]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_1 = io_a_data_0[1]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_2 = io_a_data_0[2]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_2 = io_a_data_0[2]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_3 = io_a_data_0[3]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_3 = io_a_data_0[3]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_4 = io_a_data_0[4]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_4 = io_a_data_0[4]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_5 = io_a_data_0[5]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_5 = io_a_data_0[5]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_6 = io_a_data_0[6]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_6 = io_a_data_0[6]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_7 = io_a_data_0[7]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_7 = io_a_data_0[7]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_8 = io_a_data_0[8]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_8 = io_a_data_0[8]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_9 = io_a_data_0[9]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_9 = io_a_data_0[9]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_10 = io_a_data_0[10]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_10 = io_a_data_0[10]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_11 = io_a_data_0[11]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_11 = io_a_data_0[11]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_12 = io_a_data_0[12]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_12 = io_a_data_0[12]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_13 = io_a_data_0[13]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_13 = io_a_data_0[13]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_14 = io_a_data_0[14]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_14 = io_a_data_0[14]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_15 = io_a_data_0[15]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_15 = io_a_data_0[15]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_16 = io_a_data_0[16]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_16 = io_a_data_0[16]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_17 = io_a_data_0[17]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_17 = io_a_data_0[17]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_18 = io_a_data_0[18]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_18 = io_a_data_0[18]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_19 = io_a_data_0[19]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_19 = io_a_data_0[19]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_20 = io_a_data_0[20]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_20 = io_a_data_0[20]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_21 = io_a_data_0[21]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_21 = io_a_data_0[21]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_22 = io_a_data_0[22]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_22 = io_a_data_0[22]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_23 = io_a_data_0[23]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_23 = io_a_data_0[23]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_24 = io_a_data_0[24]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_24 = io_a_data_0[24]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_25 = io_a_data_0[25]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_25 = io_a_data_0[25]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_26 = io_a_data_0[26]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_26 = io_a_data_0[26]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_27 = io_a_data_0[27]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_27 = io_a_data_0[27]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_28 = io_a_data_0[28]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_28 = io_a_data_0[28]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_29 = io_a_data_0[29]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_29 = io_a_data_0[29]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_30 = io_a_data_0[30]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_30 = io_a_data_0[30]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_31 = io_a_data_0[31]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_31 = io_a_data_0[31]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_32 = io_a_data_0[32]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_32 = io_a_data_0[32]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_33 = io_a_data_0[33]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_33 = io_a_data_0[33]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_34 = io_a_data_0[34]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_34 = io_a_data_0[34]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_35 = io_a_data_0[35]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_35 = io_a_data_0[35]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_36 = io_a_data_0[36]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_36 = io_a_data_0[36]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_37 = io_a_data_0[37]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_37 = io_a_data_0[37]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_38 = io_a_data_0[38]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_38 = io_a_data_0[38]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_39 = io_a_data_0[39]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_39 = io_a_data_0[39]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_40 = io_a_data_0[40]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_40 = io_a_data_0[40]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_41 = io_a_data_0[41]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_41 = io_a_data_0[41]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_42 = io_a_data_0[42]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_42 = io_a_data_0[42]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_43 = io_a_data_0[43]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_43 = io_a_data_0[43]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_44 = io_a_data_0[44]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_44 = io_a_data_0[44]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_45 = io_a_data_0[45]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_45 = io_a_data_0[45]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_46 = io_a_data_0[46]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_46 = io_a_data_0[46]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_47 = io_a_data_0[47]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_47 = io_a_data_0[47]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_48 = io_a_data_0[48]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_48 = io_a_data_0[48]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_49 = io_a_data_0[49]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_49 = io_a_data_0[49]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_50 = io_a_data_0[50]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_50 = io_a_data_0[50]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_51 = io_a_data_0[51]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_51 = io_a_data_0[51]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_52 = io_a_data_0[52]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_52 = io_a_data_0[52]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_53 = io_a_data_0[53]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_53 = io_a_data_0[53]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_54 = io_a_data_0[54]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_54 = io_a_data_0[54]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_55 = io_a_data_0[55]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_55 = io_a_data_0[55]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_56 = io_a_data_0[56]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_56 = io_a_data_0[56]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_57 = io_a_data_0[57]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_57 = io_a_data_0[57]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_58 = io_a_data_0[58]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_58 = io_a_data_0[58]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_59 = io_a_data_0[59]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_59 = io_a_data_0[59]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_60 = io_a_data_0[60]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_60 = io_a_data_0[60]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_61 = io_a_data_0[61]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_61 = io_a_data_0[61]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_62 = io_a_data_0[62]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_62 = io_a_data_0[62]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_63 = io_a_data_0[63]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_63 = io_a_data_0[63]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_64 = io_a_data_0[64]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_64 = io_a_data_0[64]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_65 = io_a_data_0[65]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_65 = io_a_data_0[65]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_66 = io_a_data_0[66]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_66 = io_a_data_0[66]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_67 = io_a_data_0[67]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_67 = io_a_data_0[67]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_68 = io_a_data_0[68]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_68 = io_a_data_0[68]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_69 = io_a_data_0[69]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_69 = io_a_data_0[69]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_70 = io_a_data_0[70]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_70 = io_a_data_0[70]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_71 = io_a_data_0[71]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_71 = io_a_data_0[71]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_72 = io_a_data_0[72]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_72 = io_a_data_0[72]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_73 = io_a_data_0[73]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_73 = io_a_data_0[73]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_74 = io_a_data_0[74]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_74 = io_a_data_0[74]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_75 = io_a_data_0[75]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_75 = io_a_data_0[75]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_76 = io_a_data_0[76]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_76 = io_a_data_0[76]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_77 = io_a_data_0[77]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_77 = io_a_data_0[77]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_78 = io_a_data_0[78]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_78 = io_a_data_0[78]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_79 = io_a_data_0[79]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_79 = io_a_data_0[79]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_80 = io_a_data_0[80]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_80 = io_a_data_0[80]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_81 = io_a_data_0[81]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_81 = io_a_data_0[81]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_82 = io_a_data_0[82]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_82 = io_a_data_0[82]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_83 = io_a_data_0[83]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_83 = io_a_data_0[83]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_84 = io_a_data_0[84]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_84 = io_a_data_0[84]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_85 = io_a_data_0[85]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_85 = io_a_data_0[85]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_86 = io_a_data_0[86]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_86 = io_a_data_0[86]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_87 = io_a_data_0[87]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_87 = io_a_data_0[87]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_88 = io_a_data_0[88]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_88 = io_a_data_0[88]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_89 = io_a_data_0[89]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_89 = io_a_data_0[89]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_90 = io_a_data_0[90]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_90 = io_a_data_0[90]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_91 = io_a_data_0[91]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_91 = io_a_data_0[91]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_92 = io_a_data_0[92]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_92 = io_a_data_0[92]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_93 = io_a_data_0[93]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_93 = io_a_data_0[93]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_94 = io_a_data_0[94]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_94 = io_a_data_0[94]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_95 = io_a_data_0[95]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_95 = io_a_data_0[95]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_96 = io_a_data_0[96]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_96 = io_a_data_0[96]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_97 = io_a_data_0[97]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_97 = io_a_data_0[97]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_98 = io_a_data_0[98]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_98 = io_a_data_0[98]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_99 = io_a_data_0[99]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_99 = io_a_data_0[99]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_100 = io_a_data_0[100]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_100 = io_a_data_0[100]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_101 = io_a_data_0[101]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_101 = io_a_data_0[101]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_102 = io_a_data_0[102]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_102 = io_a_data_0[102]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_103 = io_a_data_0[103]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_103 = io_a_data_0[103]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_104 = io_a_data_0[104]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_104 = io_a_data_0[104]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_105 = io_a_data_0[105]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_105 = io_a_data_0[105]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_106 = io_a_data_0[106]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_106 = io_a_data_0[106]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_107 = io_a_data_0[107]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_107 = io_a_data_0[107]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_108 = io_a_data_0[108]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_108 = io_a_data_0[108]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_109 = io_a_data_0[109]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_109 = io_a_data_0[109]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_110 = io_a_data_0[110]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_110 = io_a_data_0[110]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_111 = io_a_data_0[111]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_111 = io_a_data_0[111]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_112 = io_a_data_0[112]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_112 = io_a_data_0[112]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_113 = io_a_data_0[113]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_113 = io_a_data_0[113]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_114 = io_a_data_0[114]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_114 = io_a_data_0[114]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_115 = io_a_data_0[115]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_115 = io_a_data_0[115]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_116 = io_a_data_0[116]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_116 = io_a_data_0[116]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_117 = io_a_data_0[117]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_117 = io_a_data_0[117]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_118 = io_a_data_0[118]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_118 = io_a_data_0[118]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_119 = io_a_data_0[119]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_119 = io_a_data_0[119]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_120 = io_a_data_0[120]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_120 = io_a_data_0[120]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_121 = io_a_data_0[121]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_121 = io_a_data_0[121]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_122 = io_a_data_0[122]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_122 = io_a_data_0[122]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_123 = io_a_data_0[123]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_123 = io_a_data_0[123]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_124 = io_a_data_0[124]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_124 = io_a_data_0[124]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_125 = io_a_data_0[125]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_125 = io_a_data_0[125]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_126 = io_a_data_0[126]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_126 = io_a_data_0[126]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire _sign_a_T_127 = io_a_data_0[127]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_127 = io_a_data_0[127]; // @[Atomics.scala:8:7, :25:36, :40:32]
wire [1:0] sign_a_lo_lo_lo = {_sign_a_T_15, _sign_a_T_7}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_a_lo_lo_hi = {_sign_a_T_31, _sign_a_T_23}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_a_lo_lo = {sign_a_lo_lo_hi, sign_a_lo_lo_lo}; // @[Atomics.scala:25:33]
wire [1:0] sign_a_lo_hi_lo = {_sign_a_T_47, _sign_a_T_39}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_a_lo_hi_hi = {_sign_a_T_63, _sign_a_T_55}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_a_lo_hi = {sign_a_lo_hi_hi, sign_a_lo_hi_lo}; // @[Atomics.scala:25:33]
wire [7:0] sign_a_lo = {sign_a_lo_hi, sign_a_lo_lo}; // @[Atomics.scala:25:33]
wire [1:0] sign_a_hi_lo_lo = {_sign_a_T_79, _sign_a_T_71}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_a_hi_lo_hi = {_sign_a_T_95, _sign_a_T_87}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_a_hi_lo = {sign_a_hi_lo_hi, sign_a_hi_lo_lo}; // @[Atomics.scala:25:33]
wire [1:0] sign_a_hi_hi_lo = {_sign_a_T_111, _sign_a_T_103}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_a_hi_hi_hi = {_sign_a_T_127, _sign_a_T_119}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_a_hi_hi = {sign_a_hi_hi_hi, sign_a_hi_hi_lo}; // @[Atomics.scala:25:33]
wire [7:0] sign_a_hi = {sign_a_hi_hi, sign_a_hi_lo}; // @[Atomics.scala:25:33]
wire [15:0] _sign_a_T_128 = {sign_a_hi, sign_a_lo}; // @[Atomics.scala:25:33]
wire [15:0] _sign_a_T_129 = _sign_a_T_128 & signBit; // @[Atomics.scala:22:27, :25:{33,83}]
wire sign_a = |_sign_a_T_129; // @[Atomics.scala:25:{83,94}]
wire _sign_d_T = io_data_in_0[0]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_128 = io_data_in_0[0]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_1 = io_data_in_0[1]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_129 = io_data_in_0[1]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_2 = io_data_in_0[2]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_130 = io_data_in_0[2]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_3 = io_data_in_0[3]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_131 = io_data_in_0[3]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_4 = io_data_in_0[4]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_132 = io_data_in_0[4]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_5 = io_data_in_0[5]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_133 = io_data_in_0[5]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_6 = io_data_in_0[6]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_134 = io_data_in_0[6]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_7 = io_data_in_0[7]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_135 = io_data_in_0[7]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_8 = io_data_in_0[8]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_136 = io_data_in_0[8]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_9 = io_data_in_0[9]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_137 = io_data_in_0[9]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_10 = io_data_in_0[10]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_138 = io_data_in_0[10]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_11 = io_data_in_0[11]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_139 = io_data_in_0[11]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_12 = io_data_in_0[12]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_140 = io_data_in_0[12]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_13 = io_data_in_0[13]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_141 = io_data_in_0[13]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_14 = io_data_in_0[14]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_142 = io_data_in_0[14]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_15 = io_data_in_0[15]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_143 = io_data_in_0[15]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_16 = io_data_in_0[16]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_144 = io_data_in_0[16]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_17 = io_data_in_0[17]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_145 = io_data_in_0[17]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_18 = io_data_in_0[18]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_146 = io_data_in_0[18]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_19 = io_data_in_0[19]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_147 = io_data_in_0[19]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_20 = io_data_in_0[20]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_148 = io_data_in_0[20]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_21 = io_data_in_0[21]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_149 = io_data_in_0[21]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_22 = io_data_in_0[22]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_150 = io_data_in_0[22]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_23 = io_data_in_0[23]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_151 = io_data_in_0[23]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_24 = io_data_in_0[24]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_152 = io_data_in_0[24]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_25 = io_data_in_0[25]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_153 = io_data_in_0[25]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_26 = io_data_in_0[26]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_154 = io_data_in_0[26]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_27 = io_data_in_0[27]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_155 = io_data_in_0[27]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_28 = io_data_in_0[28]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_156 = io_data_in_0[28]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_29 = io_data_in_0[29]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_157 = io_data_in_0[29]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_30 = io_data_in_0[30]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_158 = io_data_in_0[30]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_31 = io_data_in_0[31]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_159 = io_data_in_0[31]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_32 = io_data_in_0[32]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_160 = io_data_in_0[32]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_33 = io_data_in_0[33]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_161 = io_data_in_0[33]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_34 = io_data_in_0[34]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_162 = io_data_in_0[34]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_35 = io_data_in_0[35]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_163 = io_data_in_0[35]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_36 = io_data_in_0[36]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_164 = io_data_in_0[36]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_37 = io_data_in_0[37]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_165 = io_data_in_0[37]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_38 = io_data_in_0[38]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_166 = io_data_in_0[38]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_39 = io_data_in_0[39]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_167 = io_data_in_0[39]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_40 = io_data_in_0[40]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_168 = io_data_in_0[40]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_41 = io_data_in_0[41]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_169 = io_data_in_0[41]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_42 = io_data_in_0[42]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_170 = io_data_in_0[42]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_43 = io_data_in_0[43]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_171 = io_data_in_0[43]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_44 = io_data_in_0[44]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_172 = io_data_in_0[44]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_45 = io_data_in_0[45]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_173 = io_data_in_0[45]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_46 = io_data_in_0[46]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_174 = io_data_in_0[46]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_47 = io_data_in_0[47]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_175 = io_data_in_0[47]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_48 = io_data_in_0[48]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_176 = io_data_in_0[48]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_49 = io_data_in_0[49]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_177 = io_data_in_0[49]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_50 = io_data_in_0[50]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_178 = io_data_in_0[50]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_51 = io_data_in_0[51]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_179 = io_data_in_0[51]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_52 = io_data_in_0[52]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_180 = io_data_in_0[52]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_53 = io_data_in_0[53]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_181 = io_data_in_0[53]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_54 = io_data_in_0[54]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_182 = io_data_in_0[54]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_55 = io_data_in_0[55]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_183 = io_data_in_0[55]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_56 = io_data_in_0[56]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_184 = io_data_in_0[56]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_57 = io_data_in_0[57]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_185 = io_data_in_0[57]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_58 = io_data_in_0[58]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_186 = io_data_in_0[58]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_59 = io_data_in_0[59]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_187 = io_data_in_0[59]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_60 = io_data_in_0[60]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_188 = io_data_in_0[60]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_61 = io_data_in_0[61]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_189 = io_data_in_0[61]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_62 = io_data_in_0[62]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_190 = io_data_in_0[62]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_63 = io_data_in_0[63]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_191 = io_data_in_0[63]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_64 = io_data_in_0[64]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_192 = io_data_in_0[64]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_65 = io_data_in_0[65]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_193 = io_data_in_0[65]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_66 = io_data_in_0[66]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_194 = io_data_in_0[66]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_67 = io_data_in_0[67]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_195 = io_data_in_0[67]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_68 = io_data_in_0[68]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_196 = io_data_in_0[68]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_69 = io_data_in_0[69]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_197 = io_data_in_0[69]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_70 = io_data_in_0[70]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_198 = io_data_in_0[70]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_71 = io_data_in_0[71]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_199 = io_data_in_0[71]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_72 = io_data_in_0[72]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_200 = io_data_in_0[72]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_73 = io_data_in_0[73]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_201 = io_data_in_0[73]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_74 = io_data_in_0[74]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_202 = io_data_in_0[74]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_75 = io_data_in_0[75]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_203 = io_data_in_0[75]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_76 = io_data_in_0[76]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_204 = io_data_in_0[76]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_77 = io_data_in_0[77]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_205 = io_data_in_0[77]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_78 = io_data_in_0[78]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_206 = io_data_in_0[78]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_79 = io_data_in_0[79]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_207 = io_data_in_0[79]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_80 = io_data_in_0[80]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_208 = io_data_in_0[80]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_81 = io_data_in_0[81]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_209 = io_data_in_0[81]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_82 = io_data_in_0[82]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_210 = io_data_in_0[82]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_83 = io_data_in_0[83]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_211 = io_data_in_0[83]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_84 = io_data_in_0[84]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_212 = io_data_in_0[84]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_85 = io_data_in_0[85]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_213 = io_data_in_0[85]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_86 = io_data_in_0[86]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_214 = io_data_in_0[86]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_87 = io_data_in_0[87]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_215 = io_data_in_0[87]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_88 = io_data_in_0[88]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_216 = io_data_in_0[88]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_89 = io_data_in_0[89]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_217 = io_data_in_0[89]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_90 = io_data_in_0[90]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_218 = io_data_in_0[90]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_91 = io_data_in_0[91]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_219 = io_data_in_0[91]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_92 = io_data_in_0[92]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_220 = io_data_in_0[92]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_93 = io_data_in_0[93]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_221 = io_data_in_0[93]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_94 = io_data_in_0[94]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_222 = io_data_in_0[94]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_95 = io_data_in_0[95]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_223 = io_data_in_0[95]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_96 = io_data_in_0[96]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_224 = io_data_in_0[96]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_97 = io_data_in_0[97]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_225 = io_data_in_0[97]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_98 = io_data_in_0[98]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_226 = io_data_in_0[98]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_99 = io_data_in_0[99]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_227 = io_data_in_0[99]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_100 = io_data_in_0[100]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_228 = io_data_in_0[100]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_101 = io_data_in_0[101]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_229 = io_data_in_0[101]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_102 = io_data_in_0[102]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_230 = io_data_in_0[102]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_103 = io_data_in_0[103]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_231 = io_data_in_0[103]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_104 = io_data_in_0[104]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_232 = io_data_in_0[104]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_105 = io_data_in_0[105]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_233 = io_data_in_0[105]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_106 = io_data_in_0[106]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_234 = io_data_in_0[106]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_107 = io_data_in_0[107]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_235 = io_data_in_0[107]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_108 = io_data_in_0[108]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_236 = io_data_in_0[108]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_109 = io_data_in_0[109]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_237 = io_data_in_0[109]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_110 = io_data_in_0[110]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_238 = io_data_in_0[110]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_111 = io_data_in_0[111]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_239 = io_data_in_0[111]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_112 = io_data_in_0[112]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_240 = io_data_in_0[112]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_113 = io_data_in_0[113]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_241 = io_data_in_0[113]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_114 = io_data_in_0[114]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_242 = io_data_in_0[114]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_115 = io_data_in_0[115]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_243 = io_data_in_0[115]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_116 = io_data_in_0[116]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_244 = io_data_in_0[116]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_117 = io_data_in_0[117]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_245 = io_data_in_0[117]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_118 = io_data_in_0[118]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_246 = io_data_in_0[118]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_119 = io_data_in_0[119]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_247 = io_data_in_0[119]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_120 = io_data_in_0[120]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_248 = io_data_in_0[120]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_121 = io_data_in_0[121]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_249 = io_data_in_0[121]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_122 = io_data_in_0[122]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_250 = io_data_in_0[122]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_123 = io_data_in_0[123]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_251 = io_data_in_0[123]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_124 = io_data_in_0[124]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_252 = io_data_in_0[124]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_125 = io_data_in_0[125]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_253 = io_data_in_0[125]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_126 = io_data_in_0[126]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_254 = io_data_in_0[126]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire _sign_d_T_127 = io_data_in_0[127]; // @[Atomics.scala:8:7, :25:36]
wire _logical_T_255 = io_data_in_0[127]; // @[Atomics.scala:8:7, :25:36, :40:55]
wire [1:0] sign_d_lo_lo_lo = {_sign_d_T_15, _sign_d_T_7}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_d_lo_lo_hi = {_sign_d_T_31, _sign_d_T_23}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_d_lo_lo = {sign_d_lo_lo_hi, sign_d_lo_lo_lo}; // @[Atomics.scala:25:33]
wire [1:0] sign_d_lo_hi_lo = {_sign_d_T_47, _sign_d_T_39}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_d_lo_hi_hi = {_sign_d_T_63, _sign_d_T_55}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_d_lo_hi = {sign_d_lo_hi_hi, sign_d_lo_hi_lo}; // @[Atomics.scala:25:33]
wire [7:0] sign_d_lo = {sign_d_lo_hi, sign_d_lo_lo}; // @[Atomics.scala:25:33]
wire [1:0] sign_d_hi_lo_lo = {_sign_d_T_79, _sign_d_T_71}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_d_hi_lo_hi = {_sign_d_T_95, _sign_d_T_87}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_d_hi_lo = {sign_d_hi_lo_hi, sign_d_hi_lo_lo}; // @[Atomics.scala:25:33]
wire [1:0] sign_d_hi_hi_lo = {_sign_d_T_111, _sign_d_T_103}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_d_hi_hi_hi = {_sign_d_T_127, _sign_d_T_119}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_d_hi_hi = {sign_d_hi_hi_hi, sign_d_hi_hi_lo}; // @[Atomics.scala:25:33]
wire [7:0] sign_d_hi = {sign_d_hi_hi, sign_d_hi_lo}; // @[Atomics.scala:25:33]
wire [15:0] _sign_d_T_128 = {sign_d_hi, sign_d_lo}; // @[Atomics.scala:25:33]
wire [15:0] _sign_d_T_129 = _sign_d_T_128 & signBit; // @[Atomics.scala:22:27, :25:{33,83}]
wire sign_d = |_sign_d_T_129; // @[Atomics.scala:25:{83,94}]
wire _sign_s_T = sum[0]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_1 = sum[1]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_2 = sum[2]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_3 = sum[3]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_4 = sum[4]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_5 = sum[5]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_6 = sum[6]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_7 = sum[7]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_8 = sum[8]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_9 = sum[9]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_10 = sum[10]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_11 = sum[11]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_12 = sum[12]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_13 = sum[13]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_14 = sum[14]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_15 = sum[15]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_16 = sum[16]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_17 = sum[17]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_18 = sum[18]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_19 = sum[19]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_20 = sum[20]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_21 = sum[21]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_22 = sum[22]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_23 = sum[23]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_24 = sum[24]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_25 = sum[25]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_26 = sum[26]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_27 = sum[27]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_28 = sum[28]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_29 = sum[29]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_30 = sum[30]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_31 = sum[31]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_32 = sum[32]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_33 = sum[33]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_34 = sum[34]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_35 = sum[35]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_36 = sum[36]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_37 = sum[37]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_38 = sum[38]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_39 = sum[39]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_40 = sum[40]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_41 = sum[41]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_42 = sum[42]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_43 = sum[43]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_44 = sum[44]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_45 = sum[45]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_46 = sum[46]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_47 = sum[47]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_48 = sum[48]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_49 = sum[49]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_50 = sum[50]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_51 = sum[51]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_52 = sum[52]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_53 = sum[53]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_54 = sum[54]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_55 = sum[55]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_56 = sum[56]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_57 = sum[57]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_58 = sum[58]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_59 = sum[59]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_60 = sum[60]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_61 = sum[61]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_62 = sum[62]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_63 = sum[63]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_64 = sum[64]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_65 = sum[65]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_66 = sum[66]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_67 = sum[67]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_68 = sum[68]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_69 = sum[69]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_70 = sum[70]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_71 = sum[71]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_72 = sum[72]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_73 = sum[73]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_74 = sum[74]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_75 = sum[75]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_76 = sum[76]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_77 = sum[77]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_78 = sum[78]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_79 = sum[79]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_80 = sum[80]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_81 = sum[81]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_82 = sum[82]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_83 = sum[83]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_84 = sum[84]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_85 = sum[85]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_86 = sum[86]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_87 = sum[87]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_88 = sum[88]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_89 = sum[89]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_90 = sum[90]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_91 = sum[91]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_92 = sum[92]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_93 = sum[93]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_94 = sum[94]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_95 = sum[95]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_96 = sum[96]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_97 = sum[97]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_98 = sum[98]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_99 = sum[99]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_100 = sum[100]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_101 = sum[101]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_102 = sum[102]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_103 = sum[103]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_104 = sum[104]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_105 = sum[105]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_106 = sum[106]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_107 = sum[107]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_108 = sum[108]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_109 = sum[109]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_110 = sum[110]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_111 = sum[111]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_112 = sum[112]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_113 = sum[113]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_114 = sum[114]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_115 = sum[115]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_116 = sum[116]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_117 = sum[117]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_118 = sum[118]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_119 = sum[119]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_120 = sum[120]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_121 = sum[121]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_122 = sum[122]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_123 = sum[123]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_124 = sum[124]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_125 = sum[125]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_126 = sum[126]; // @[Atomics.scala:24:57, :25:36]
wire _sign_s_T_127 = sum[127]; // @[Atomics.scala:24:57, :25:36]
wire [1:0] sign_s_lo_lo_lo = {_sign_s_T_15, _sign_s_T_7}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_s_lo_lo_hi = {_sign_s_T_31, _sign_s_T_23}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_s_lo_lo = {sign_s_lo_lo_hi, sign_s_lo_lo_lo}; // @[Atomics.scala:25:33]
wire [1:0] sign_s_lo_hi_lo = {_sign_s_T_47, _sign_s_T_39}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_s_lo_hi_hi = {_sign_s_T_63, _sign_s_T_55}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_s_lo_hi = {sign_s_lo_hi_hi, sign_s_lo_hi_lo}; // @[Atomics.scala:25:33]
wire [7:0] sign_s_lo = {sign_s_lo_hi, sign_s_lo_lo}; // @[Atomics.scala:25:33]
wire [1:0] sign_s_hi_lo_lo = {_sign_s_T_79, _sign_s_T_71}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_s_hi_lo_hi = {_sign_s_T_95, _sign_s_T_87}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_s_hi_lo = {sign_s_hi_lo_hi, sign_s_hi_lo_lo}; // @[Atomics.scala:25:33]
wire [1:0] sign_s_hi_hi_lo = {_sign_s_T_111, _sign_s_T_103}; // @[Atomics.scala:25:{33,36}]
wire [1:0] sign_s_hi_hi_hi = {_sign_s_T_127, _sign_s_T_119}; // @[Atomics.scala:25:{33,36}]
wire [3:0] sign_s_hi_hi = {sign_s_hi_hi_hi, sign_s_hi_hi_lo}; // @[Atomics.scala:25:33]
wire [7:0] sign_s_hi = {sign_s_hi_hi, sign_s_hi_lo}; // @[Atomics.scala:25:33]
wire [15:0] _sign_s_T_128 = {sign_s_hi, sign_s_lo}; // @[Atomics.scala:25:33]
wire [15:0] _sign_s_T_129 = _sign_s_T_128 & signBit; // @[Atomics.scala:22:27, :25:{33,83}]
wire sign_s = |_sign_s_T_129; // @[Atomics.scala:25:{83,94}]
wire a_bigger_uneq = unsigned_0 == sign_a; // @[Atomics.scala:19:28, :25:94, :29:32]
wire _a_bigger_T = sign_a == sign_d; // @[Atomics.scala:25:94, :30:29]
wire _a_bigger_T_1 = ~sign_s; // @[Atomics.scala:25:94, :30:41]
wire a_bigger = _a_bigger_T ? _a_bigger_T_1 : a_bigger_uneq; // @[Atomics.scala:29:32, :30:{21,29,41}]
wire pick_a = take_max == a_bigger; // @[Atomics.scala:20:28, :30:21, :31:25]
wire _select_T = pick_a; // @[Atomics.scala:31:25, :48:24]
wire [1:0] _lut_T = io_a_param_0[1:0]; // @[Atomics.scala:8:7, :39:15]
wire [1:0] _logical_T_256 = {_logical_T, _logical_T_128}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_257 = _GEN[_lut_T] >> _logical_T_256; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_258 = _logical_T_257[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_259 = {_logical_T_1, _logical_T_129}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_260 = _GEN[_lut_T] >> _logical_T_259; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_261 = _logical_T_260[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_262 = {_logical_T_2, _logical_T_130}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_263 = _GEN[_lut_T] >> _logical_T_262; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_264 = _logical_T_263[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_265 = {_logical_T_3, _logical_T_131}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_266 = _GEN[_lut_T] >> _logical_T_265; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_267 = _logical_T_266[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_268 = {_logical_T_4, _logical_T_132}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_269 = _GEN[_lut_T] >> _logical_T_268; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_270 = _logical_T_269[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_271 = {_logical_T_5, _logical_T_133}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_272 = _GEN[_lut_T] >> _logical_T_271; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_273 = _logical_T_272[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_274 = {_logical_T_6, _logical_T_134}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_275 = _GEN[_lut_T] >> _logical_T_274; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_276 = _logical_T_275[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_277 = {_logical_T_7, _logical_T_135}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_278 = _GEN[_lut_T] >> _logical_T_277; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_279 = _logical_T_278[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_280 = {_logical_T_8, _logical_T_136}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_281 = _GEN[_lut_T] >> _logical_T_280; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_282 = _logical_T_281[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_283 = {_logical_T_9, _logical_T_137}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_284 = _GEN[_lut_T] >> _logical_T_283; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_285 = _logical_T_284[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_286 = {_logical_T_10, _logical_T_138}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_287 = _GEN[_lut_T] >> _logical_T_286; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_288 = _logical_T_287[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_289 = {_logical_T_11, _logical_T_139}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_290 = _GEN[_lut_T] >> _logical_T_289; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_291 = _logical_T_290[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_292 = {_logical_T_12, _logical_T_140}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_293 = _GEN[_lut_T] >> _logical_T_292; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_294 = _logical_T_293[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_295 = {_logical_T_13, _logical_T_141}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_296 = _GEN[_lut_T] >> _logical_T_295; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_297 = _logical_T_296[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_298 = {_logical_T_14, _logical_T_142}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_299 = _GEN[_lut_T] >> _logical_T_298; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_300 = _logical_T_299[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_301 = {_logical_T_15, _logical_T_143}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_302 = _GEN[_lut_T] >> _logical_T_301; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_303 = _logical_T_302[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_304 = {_logical_T_16, _logical_T_144}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_305 = _GEN[_lut_T] >> _logical_T_304; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_306 = _logical_T_305[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_307 = {_logical_T_17, _logical_T_145}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_308 = _GEN[_lut_T] >> _logical_T_307; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_309 = _logical_T_308[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_310 = {_logical_T_18, _logical_T_146}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_311 = _GEN[_lut_T] >> _logical_T_310; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_312 = _logical_T_311[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_313 = {_logical_T_19, _logical_T_147}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_314 = _GEN[_lut_T] >> _logical_T_313; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_315 = _logical_T_314[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_316 = {_logical_T_20, _logical_T_148}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_317 = _GEN[_lut_T] >> _logical_T_316; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_318 = _logical_T_317[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_319 = {_logical_T_21, _logical_T_149}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_320 = _GEN[_lut_T] >> _logical_T_319; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_321 = _logical_T_320[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_322 = {_logical_T_22, _logical_T_150}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_323 = _GEN[_lut_T] >> _logical_T_322; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_324 = _logical_T_323[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_325 = {_logical_T_23, _logical_T_151}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_326 = _GEN[_lut_T] >> _logical_T_325; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_327 = _logical_T_326[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_328 = {_logical_T_24, _logical_T_152}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_329 = _GEN[_lut_T] >> _logical_T_328; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_330 = _logical_T_329[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_331 = {_logical_T_25, _logical_T_153}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_332 = _GEN[_lut_T] >> _logical_T_331; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_333 = _logical_T_332[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_334 = {_logical_T_26, _logical_T_154}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_335 = _GEN[_lut_T] >> _logical_T_334; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_336 = _logical_T_335[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_337 = {_logical_T_27, _logical_T_155}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_338 = _GEN[_lut_T] >> _logical_T_337; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_339 = _logical_T_338[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_340 = {_logical_T_28, _logical_T_156}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_341 = _GEN[_lut_T] >> _logical_T_340; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_342 = _logical_T_341[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_343 = {_logical_T_29, _logical_T_157}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_344 = _GEN[_lut_T] >> _logical_T_343; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_345 = _logical_T_344[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_346 = {_logical_T_30, _logical_T_158}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_347 = _GEN[_lut_T] >> _logical_T_346; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_348 = _logical_T_347[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_349 = {_logical_T_31, _logical_T_159}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_350 = _GEN[_lut_T] >> _logical_T_349; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_351 = _logical_T_350[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_352 = {_logical_T_32, _logical_T_160}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_353 = _GEN[_lut_T] >> _logical_T_352; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_354 = _logical_T_353[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_355 = {_logical_T_33, _logical_T_161}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_356 = _GEN[_lut_T] >> _logical_T_355; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_357 = _logical_T_356[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_358 = {_logical_T_34, _logical_T_162}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_359 = _GEN[_lut_T] >> _logical_T_358; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_360 = _logical_T_359[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_361 = {_logical_T_35, _logical_T_163}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_362 = _GEN[_lut_T] >> _logical_T_361; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_363 = _logical_T_362[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_364 = {_logical_T_36, _logical_T_164}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_365 = _GEN[_lut_T] >> _logical_T_364; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_366 = _logical_T_365[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_367 = {_logical_T_37, _logical_T_165}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_368 = _GEN[_lut_T] >> _logical_T_367; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_369 = _logical_T_368[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_370 = {_logical_T_38, _logical_T_166}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_371 = _GEN[_lut_T] >> _logical_T_370; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_372 = _logical_T_371[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_373 = {_logical_T_39, _logical_T_167}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_374 = _GEN[_lut_T] >> _logical_T_373; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_375 = _logical_T_374[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_376 = {_logical_T_40, _logical_T_168}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_377 = _GEN[_lut_T] >> _logical_T_376; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_378 = _logical_T_377[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_379 = {_logical_T_41, _logical_T_169}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_380 = _GEN[_lut_T] >> _logical_T_379; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_381 = _logical_T_380[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_382 = {_logical_T_42, _logical_T_170}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_383 = _GEN[_lut_T] >> _logical_T_382; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_384 = _logical_T_383[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_385 = {_logical_T_43, _logical_T_171}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_386 = _GEN[_lut_T] >> _logical_T_385; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_387 = _logical_T_386[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_388 = {_logical_T_44, _logical_T_172}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_389 = _GEN[_lut_T] >> _logical_T_388; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_390 = _logical_T_389[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_391 = {_logical_T_45, _logical_T_173}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_392 = _GEN[_lut_T] >> _logical_T_391; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_393 = _logical_T_392[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_394 = {_logical_T_46, _logical_T_174}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_395 = _GEN[_lut_T] >> _logical_T_394; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_396 = _logical_T_395[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_397 = {_logical_T_47, _logical_T_175}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_398 = _GEN[_lut_T] >> _logical_T_397; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_399 = _logical_T_398[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_400 = {_logical_T_48, _logical_T_176}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_401 = _GEN[_lut_T] >> _logical_T_400; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_402 = _logical_T_401[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_403 = {_logical_T_49, _logical_T_177}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_404 = _GEN[_lut_T] >> _logical_T_403; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_405 = _logical_T_404[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_406 = {_logical_T_50, _logical_T_178}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_407 = _GEN[_lut_T] >> _logical_T_406; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_408 = _logical_T_407[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_409 = {_logical_T_51, _logical_T_179}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_410 = _GEN[_lut_T] >> _logical_T_409; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_411 = _logical_T_410[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_412 = {_logical_T_52, _logical_T_180}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_413 = _GEN[_lut_T] >> _logical_T_412; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_414 = _logical_T_413[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_415 = {_logical_T_53, _logical_T_181}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_416 = _GEN[_lut_T] >> _logical_T_415; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_417 = _logical_T_416[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_418 = {_logical_T_54, _logical_T_182}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_419 = _GEN[_lut_T] >> _logical_T_418; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_420 = _logical_T_419[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_421 = {_logical_T_55, _logical_T_183}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_422 = _GEN[_lut_T] >> _logical_T_421; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_423 = _logical_T_422[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_424 = {_logical_T_56, _logical_T_184}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_425 = _GEN[_lut_T] >> _logical_T_424; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_426 = _logical_T_425[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_427 = {_logical_T_57, _logical_T_185}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_428 = _GEN[_lut_T] >> _logical_T_427; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_429 = _logical_T_428[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_430 = {_logical_T_58, _logical_T_186}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_431 = _GEN[_lut_T] >> _logical_T_430; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_432 = _logical_T_431[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_433 = {_logical_T_59, _logical_T_187}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_434 = _GEN[_lut_T] >> _logical_T_433; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_435 = _logical_T_434[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_436 = {_logical_T_60, _logical_T_188}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_437 = _GEN[_lut_T] >> _logical_T_436; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_438 = _logical_T_437[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_439 = {_logical_T_61, _logical_T_189}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_440 = _GEN[_lut_T] >> _logical_T_439; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_441 = _logical_T_440[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_442 = {_logical_T_62, _logical_T_190}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_443 = _GEN[_lut_T] >> _logical_T_442; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_444 = _logical_T_443[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_445 = {_logical_T_63, _logical_T_191}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_446 = _GEN[_lut_T] >> _logical_T_445; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_447 = _logical_T_446[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_448 = {_logical_T_64, _logical_T_192}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_449 = _GEN[_lut_T] >> _logical_T_448; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_450 = _logical_T_449[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_451 = {_logical_T_65, _logical_T_193}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_452 = _GEN[_lut_T] >> _logical_T_451; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_453 = _logical_T_452[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_454 = {_logical_T_66, _logical_T_194}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_455 = _GEN[_lut_T] >> _logical_T_454; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_456 = _logical_T_455[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_457 = {_logical_T_67, _logical_T_195}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_458 = _GEN[_lut_T] >> _logical_T_457; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_459 = _logical_T_458[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_460 = {_logical_T_68, _logical_T_196}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_461 = _GEN[_lut_T] >> _logical_T_460; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_462 = _logical_T_461[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_463 = {_logical_T_69, _logical_T_197}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_464 = _GEN[_lut_T] >> _logical_T_463; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_465 = _logical_T_464[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_466 = {_logical_T_70, _logical_T_198}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_467 = _GEN[_lut_T] >> _logical_T_466; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_468 = _logical_T_467[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_469 = {_logical_T_71, _logical_T_199}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_470 = _GEN[_lut_T] >> _logical_T_469; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_471 = _logical_T_470[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_472 = {_logical_T_72, _logical_T_200}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_473 = _GEN[_lut_T] >> _logical_T_472; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_474 = _logical_T_473[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_475 = {_logical_T_73, _logical_T_201}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_476 = _GEN[_lut_T] >> _logical_T_475; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_477 = _logical_T_476[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_478 = {_logical_T_74, _logical_T_202}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_479 = _GEN[_lut_T] >> _logical_T_478; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_480 = _logical_T_479[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_481 = {_logical_T_75, _logical_T_203}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_482 = _GEN[_lut_T] >> _logical_T_481; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_483 = _logical_T_482[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_484 = {_logical_T_76, _logical_T_204}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_485 = _GEN[_lut_T] >> _logical_T_484; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_486 = _logical_T_485[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_487 = {_logical_T_77, _logical_T_205}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_488 = _GEN[_lut_T] >> _logical_T_487; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_489 = _logical_T_488[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_490 = {_logical_T_78, _logical_T_206}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_491 = _GEN[_lut_T] >> _logical_T_490; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_492 = _logical_T_491[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_493 = {_logical_T_79, _logical_T_207}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_494 = _GEN[_lut_T] >> _logical_T_493; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_495 = _logical_T_494[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_496 = {_logical_T_80, _logical_T_208}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_497 = _GEN[_lut_T] >> _logical_T_496; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_498 = _logical_T_497[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_499 = {_logical_T_81, _logical_T_209}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_500 = _GEN[_lut_T] >> _logical_T_499; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_501 = _logical_T_500[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_502 = {_logical_T_82, _logical_T_210}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_503 = _GEN[_lut_T] >> _logical_T_502; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_504 = _logical_T_503[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_505 = {_logical_T_83, _logical_T_211}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_506 = _GEN[_lut_T] >> _logical_T_505; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_507 = _logical_T_506[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_508 = {_logical_T_84, _logical_T_212}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_509 = _GEN[_lut_T] >> _logical_T_508; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_510 = _logical_T_509[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_511 = {_logical_T_85, _logical_T_213}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_512 = _GEN[_lut_T] >> _logical_T_511; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_513 = _logical_T_512[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_514 = {_logical_T_86, _logical_T_214}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_515 = _GEN[_lut_T] >> _logical_T_514; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_516 = _logical_T_515[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_517 = {_logical_T_87, _logical_T_215}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_518 = _GEN[_lut_T] >> _logical_T_517; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_519 = _logical_T_518[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_520 = {_logical_T_88, _logical_T_216}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_521 = _GEN[_lut_T] >> _logical_T_520; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_522 = _logical_T_521[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_523 = {_logical_T_89, _logical_T_217}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_524 = _GEN[_lut_T] >> _logical_T_523; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_525 = _logical_T_524[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_526 = {_logical_T_90, _logical_T_218}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_527 = _GEN[_lut_T] >> _logical_T_526; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_528 = _logical_T_527[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_529 = {_logical_T_91, _logical_T_219}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_530 = _GEN[_lut_T] >> _logical_T_529; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_531 = _logical_T_530[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_532 = {_logical_T_92, _logical_T_220}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_533 = _GEN[_lut_T] >> _logical_T_532; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_534 = _logical_T_533[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_535 = {_logical_T_93, _logical_T_221}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_536 = _GEN[_lut_T] >> _logical_T_535; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_537 = _logical_T_536[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_538 = {_logical_T_94, _logical_T_222}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_539 = _GEN[_lut_T] >> _logical_T_538; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_540 = _logical_T_539[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_541 = {_logical_T_95, _logical_T_223}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_542 = _GEN[_lut_T] >> _logical_T_541; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_543 = _logical_T_542[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_544 = {_logical_T_96, _logical_T_224}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_545 = _GEN[_lut_T] >> _logical_T_544; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_546 = _logical_T_545[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_547 = {_logical_T_97, _logical_T_225}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_548 = _GEN[_lut_T] >> _logical_T_547; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_549 = _logical_T_548[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_550 = {_logical_T_98, _logical_T_226}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_551 = _GEN[_lut_T] >> _logical_T_550; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_552 = _logical_T_551[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_553 = {_logical_T_99, _logical_T_227}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_554 = _GEN[_lut_T] >> _logical_T_553; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_555 = _logical_T_554[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_556 = {_logical_T_100, _logical_T_228}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_557 = _GEN[_lut_T] >> _logical_T_556; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_558 = _logical_T_557[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_559 = {_logical_T_101, _logical_T_229}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_560 = _GEN[_lut_T] >> _logical_T_559; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_561 = _logical_T_560[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_562 = {_logical_T_102, _logical_T_230}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_563 = _GEN[_lut_T] >> _logical_T_562; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_564 = _logical_T_563[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_565 = {_logical_T_103, _logical_T_231}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_566 = _GEN[_lut_T] >> _logical_T_565; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_567 = _logical_T_566[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_568 = {_logical_T_104, _logical_T_232}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_569 = _GEN[_lut_T] >> _logical_T_568; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_570 = _logical_T_569[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_571 = {_logical_T_105, _logical_T_233}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_572 = _GEN[_lut_T] >> _logical_T_571; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_573 = _logical_T_572[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_574 = {_logical_T_106, _logical_T_234}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_575 = _GEN[_lut_T] >> _logical_T_574; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_576 = _logical_T_575[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_577 = {_logical_T_107, _logical_T_235}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_578 = _GEN[_lut_T] >> _logical_T_577; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_579 = _logical_T_578[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_580 = {_logical_T_108, _logical_T_236}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_581 = _GEN[_lut_T] >> _logical_T_580; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_582 = _logical_T_581[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_583 = {_logical_T_109, _logical_T_237}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_584 = _GEN[_lut_T] >> _logical_T_583; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_585 = _logical_T_584[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_586 = {_logical_T_110, _logical_T_238}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_587 = _GEN[_lut_T] >> _logical_T_586; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_588 = _logical_T_587[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_589 = {_logical_T_111, _logical_T_239}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_590 = _GEN[_lut_T] >> _logical_T_589; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_591 = _logical_T_590[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_592 = {_logical_T_112, _logical_T_240}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_593 = _GEN[_lut_T] >> _logical_T_592; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_594 = _logical_T_593[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_595 = {_logical_T_113, _logical_T_241}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_596 = _GEN[_lut_T] >> _logical_T_595; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_597 = _logical_T_596[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_598 = {_logical_T_114, _logical_T_242}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_599 = _GEN[_lut_T] >> _logical_T_598; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_600 = _logical_T_599[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_601 = {_logical_T_115, _logical_T_243}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_602 = _GEN[_lut_T] >> _logical_T_601; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_603 = _logical_T_602[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_604 = {_logical_T_116, _logical_T_244}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_605 = _GEN[_lut_T] >> _logical_T_604; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_606 = _logical_T_605[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_607 = {_logical_T_117, _logical_T_245}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_608 = _GEN[_lut_T] >> _logical_T_607; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_609 = _logical_T_608[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_610 = {_logical_T_118, _logical_T_246}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_611 = _GEN[_lut_T] >> _logical_T_610; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_612 = _logical_T_611[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_613 = {_logical_T_119, _logical_T_247}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_614 = _GEN[_lut_T] >> _logical_T_613; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_615 = _logical_T_614[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_616 = {_logical_T_120, _logical_T_248}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_617 = _GEN[_lut_T] >> _logical_T_616; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_618 = _logical_T_617[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_619 = {_logical_T_121, _logical_T_249}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_620 = _GEN[_lut_T] >> _logical_T_619; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_621 = _logical_T_620[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_622 = {_logical_T_122, _logical_T_250}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_623 = _GEN[_lut_T] >> _logical_T_622; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_624 = _logical_T_623[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_625 = {_logical_T_123, _logical_T_251}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_626 = _GEN[_lut_T] >> _logical_T_625; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_627 = _logical_T_626[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_628 = {_logical_T_124, _logical_T_252}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_629 = _GEN[_lut_T] >> _logical_T_628; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_630 = _logical_T_629[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_631 = {_logical_T_125, _logical_T_253}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_632 = _GEN[_lut_T] >> _logical_T_631; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_633 = _logical_T_632[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_634 = {_logical_T_126, _logical_T_254}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_635 = _GEN[_lut_T] >> _logical_T_634; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_636 = _logical_T_635[0]; // @[Atomics.scala:41:8]
wire [1:0] _logical_T_637 = {_logical_T_127, _logical_T_255}; // @[Atomics.scala:40:{32,55}, :41:12]
wire [3:0] _logical_T_638 = _GEN[_lut_T] >> _logical_T_637; // @[Atomics.scala:39:15, :41:{8,12}]
wire _logical_T_639 = _logical_T_638[0]; // @[Atomics.scala:41:8]
wire [1:0] logical_lo_lo_lo_lo_lo_lo = {_logical_T_261, _logical_T_258}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_lo_lo_lo_lo_hi = {_logical_T_267, _logical_T_264}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_lo_lo_lo_lo = {logical_lo_lo_lo_lo_lo_hi, logical_lo_lo_lo_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_lo_lo_lo_hi_lo = {_logical_T_273, _logical_T_270}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_lo_lo_lo_hi_hi = {_logical_T_279, _logical_T_276}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_lo_lo_lo_hi = {logical_lo_lo_lo_lo_hi_hi, logical_lo_lo_lo_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_lo_lo_lo_lo = {logical_lo_lo_lo_lo_hi, logical_lo_lo_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_lo_lo_hi_lo_lo = {_logical_T_285, _logical_T_282}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_lo_lo_hi_lo_hi = {_logical_T_291, _logical_T_288}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_lo_lo_hi_lo = {logical_lo_lo_lo_hi_lo_hi, logical_lo_lo_lo_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_lo_lo_hi_hi_lo = {_logical_T_297, _logical_T_294}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_lo_lo_hi_hi_hi = {_logical_T_303, _logical_T_300}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_lo_lo_hi_hi = {logical_lo_lo_lo_hi_hi_hi, logical_lo_lo_lo_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_lo_lo_lo_hi = {logical_lo_lo_lo_hi_hi, logical_lo_lo_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [15:0] logical_lo_lo_lo = {logical_lo_lo_lo_hi, logical_lo_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_lo_hi_lo_lo_lo = {_logical_T_309, _logical_T_306}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_lo_hi_lo_lo_hi = {_logical_T_315, _logical_T_312}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_lo_hi_lo_lo = {logical_lo_lo_hi_lo_lo_hi, logical_lo_lo_hi_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_lo_hi_lo_hi_lo = {_logical_T_321, _logical_T_318}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_lo_hi_lo_hi_hi = {_logical_T_327, _logical_T_324}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_lo_hi_lo_hi = {logical_lo_lo_hi_lo_hi_hi, logical_lo_lo_hi_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_lo_lo_hi_lo = {logical_lo_lo_hi_lo_hi, logical_lo_lo_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_lo_hi_hi_lo_lo = {_logical_T_333, _logical_T_330}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_lo_hi_hi_lo_hi = {_logical_T_339, _logical_T_336}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_lo_hi_hi_lo = {logical_lo_lo_hi_hi_lo_hi, logical_lo_lo_hi_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_lo_hi_hi_hi_lo = {_logical_T_345, _logical_T_342}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_lo_hi_hi_hi_hi = {_logical_T_351, _logical_T_348}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_lo_hi_hi_hi = {logical_lo_lo_hi_hi_hi_hi, logical_lo_lo_hi_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_lo_lo_hi_hi = {logical_lo_lo_hi_hi_hi, logical_lo_lo_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [15:0] logical_lo_lo_hi = {logical_lo_lo_hi_hi, logical_lo_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [31:0] logical_lo_lo = {logical_lo_lo_hi, logical_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_hi_lo_lo_lo_lo = {_logical_T_357, _logical_T_354}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_hi_lo_lo_lo_hi = {_logical_T_363, _logical_T_360}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_hi_lo_lo_lo = {logical_lo_hi_lo_lo_lo_hi, logical_lo_hi_lo_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_hi_lo_lo_hi_lo = {_logical_T_369, _logical_T_366}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_hi_lo_lo_hi_hi = {_logical_T_375, _logical_T_372}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_hi_lo_lo_hi = {logical_lo_hi_lo_lo_hi_hi, logical_lo_hi_lo_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_lo_hi_lo_lo = {logical_lo_hi_lo_lo_hi, logical_lo_hi_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_hi_lo_hi_lo_lo = {_logical_T_381, _logical_T_378}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_hi_lo_hi_lo_hi = {_logical_T_387, _logical_T_384}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_hi_lo_hi_lo = {logical_lo_hi_lo_hi_lo_hi, logical_lo_hi_lo_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_hi_lo_hi_hi_lo = {_logical_T_393, _logical_T_390}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_hi_lo_hi_hi_hi = {_logical_T_399, _logical_T_396}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_hi_lo_hi_hi = {logical_lo_hi_lo_hi_hi_hi, logical_lo_hi_lo_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_lo_hi_lo_hi = {logical_lo_hi_lo_hi_hi, logical_lo_hi_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [15:0] logical_lo_hi_lo = {logical_lo_hi_lo_hi, logical_lo_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_hi_hi_lo_lo_lo = {_logical_T_405, _logical_T_402}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_hi_hi_lo_lo_hi = {_logical_T_411, _logical_T_408}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_hi_hi_lo_lo = {logical_lo_hi_hi_lo_lo_hi, logical_lo_hi_hi_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_hi_hi_lo_hi_lo = {_logical_T_417, _logical_T_414}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_hi_hi_lo_hi_hi = {_logical_T_423, _logical_T_420}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_hi_hi_lo_hi = {logical_lo_hi_hi_lo_hi_hi, logical_lo_hi_hi_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_lo_hi_hi_lo = {logical_lo_hi_hi_lo_hi, logical_lo_hi_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_hi_hi_hi_lo_lo = {_logical_T_429, _logical_T_426}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_hi_hi_hi_lo_hi = {_logical_T_435, _logical_T_432}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_hi_hi_hi_lo = {logical_lo_hi_hi_hi_lo_hi, logical_lo_hi_hi_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_lo_hi_hi_hi_hi_lo = {_logical_T_441, _logical_T_438}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_lo_hi_hi_hi_hi_hi = {_logical_T_447, _logical_T_444}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_lo_hi_hi_hi_hi = {logical_lo_hi_hi_hi_hi_hi, logical_lo_hi_hi_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_lo_hi_hi_hi = {logical_lo_hi_hi_hi_hi, logical_lo_hi_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [15:0] logical_lo_hi_hi = {logical_lo_hi_hi_hi, logical_lo_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [31:0] logical_lo_hi = {logical_lo_hi_hi, logical_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [63:0] logical_lo = {logical_lo_hi, logical_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_lo_lo_lo_lo_lo = {_logical_T_453, _logical_T_450}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_lo_lo_lo_lo_hi = {_logical_T_459, _logical_T_456}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_lo_lo_lo_lo = {logical_hi_lo_lo_lo_lo_hi, logical_hi_lo_lo_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_lo_lo_lo_hi_lo = {_logical_T_465, _logical_T_462}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_lo_lo_lo_hi_hi = {_logical_T_471, _logical_T_468}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_lo_lo_lo_hi = {logical_hi_lo_lo_lo_hi_hi, logical_hi_lo_lo_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_hi_lo_lo_lo = {logical_hi_lo_lo_lo_hi, logical_hi_lo_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_lo_lo_hi_lo_lo = {_logical_T_477, _logical_T_474}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_lo_lo_hi_lo_hi = {_logical_T_483, _logical_T_480}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_lo_lo_hi_lo = {logical_hi_lo_lo_hi_lo_hi, logical_hi_lo_lo_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_lo_lo_hi_hi_lo = {_logical_T_489, _logical_T_486}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_lo_lo_hi_hi_hi = {_logical_T_495, _logical_T_492}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_lo_lo_hi_hi = {logical_hi_lo_lo_hi_hi_hi, logical_hi_lo_lo_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_hi_lo_lo_hi = {logical_hi_lo_lo_hi_hi, logical_hi_lo_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [15:0] logical_hi_lo_lo = {logical_hi_lo_lo_hi, logical_hi_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_lo_hi_lo_lo_lo = {_logical_T_501, _logical_T_498}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_lo_hi_lo_lo_hi = {_logical_T_507, _logical_T_504}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_lo_hi_lo_lo = {logical_hi_lo_hi_lo_lo_hi, logical_hi_lo_hi_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_lo_hi_lo_hi_lo = {_logical_T_513, _logical_T_510}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_lo_hi_lo_hi_hi = {_logical_T_519, _logical_T_516}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_lo_hi_lo_hi = {logical_hi_lo_hi_lo_hi_hi, logical_hi_lo_hi_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_hi_lo_hi_lo = {logical_hi_lo_hi_lo_hi, logical_hi_lo_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_lo_hi_hi_lo_lo = {_logical_T_525, _logical_T_522}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_lo_hi_hi_lo_hi = {_logical_T_531, _logical_T_528}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_lo_hi_hi_lo = {logical_hi_lo_hi_hi_lo_hi, logical_hi_lo_hi_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_lo_hi_hi_hi_lo = {_logical_T_537, _logical_T_534}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_lo_hi_hi_hi_hi = {_logical_T_543, _logical_T_540}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_lo_hi_hi_hi = {logical_hi_lo_hi_hi_hi_hi, logical_hi_lo_hi_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_hi_lo_hi_hi = {logical_hi_lo_hi_hi_hi, logical_hi_lo_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [15:0] logical_hi_lo_hi = {logical_hi_lo_hi_hi, logical_hi_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [31:0] logical_hi_lo = {logical_hi_lo_hi, logical_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_hi_lo_lo_lo_lo = {_logical_T_549, _logical_T_546}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_hi_lo_lo_lo_hi = {_logical_T_555, _logical_T_552}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_hi_lo_lo_lo = {logical_hi_hi_lo_lo_lo_hi, logical_hi_hi_lo_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_hi_lo_lo_hi_lo = {_logical_T_561, _logical_T_558}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_hi_lo_lo_hi_hi = {_logical_T_567, _logical_T_564}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_hi_lo_lo_hi = {logical_hi_hi_lo_lo_hi_hi, logical_hi_hi_lo_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_hi_hi_lo_lo = {logical_hi_hi_lo_lo_hi, logical_hi_hi_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_hi_lo_hi_lo_lo = {_logical_T_573, _logical_T_570}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_hi_lo_hi_lo_hi = {_logical_T_579, _logical_T_576}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_hi_lo_hi_lo = {logical_hi_hi_lo_hi_lo_hi, logical_hi_hi_lo_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_hi_lo_hi_hi_lo = {_logical_T_585, _logical_T_582}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_hi_lo_hi_hi_hi = {_logical_T_591, _logical_T_588}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_hi_lo_hi_hi = {logical_hi_hi_lo_hi_hi_hi, logical_hi_hi_lo_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_hi_hi_lo_hi = {logical_hi_hi_lo_hi_hi, logical_hi_hi_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [15:0] logical_hi_hi_lo = {logical_hi_hi_lo_hi, logical_hi_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_hi_hi_lo_lo_lo = {_logical_T_597, _logical_T_594}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_hi_hi_lo_lo_hi = {_logical_T_603, _logical_T_600}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_hi_hi_lo_lo = {logical_hi_hi_hi_lo_lo_hi, logical_hi_hi_hi_lo_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_hi_hi_lo_hi_lo = {_logical_T_609, _logical_T_606}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_hi_hi_lo_hi_hi = {_logical_T_615, _logical_T_612}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_hi_hi_lo_hi = {logical_hi_hi_hi_lo_hi_hi, logical_hi_hi_hi_lo_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_hi_hi_hi_lo = {logical_hi_hi_hi_lo_hi, logical_hi_hi_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_hi_hi_hi_lo_lo = {_logical_T_621, _logical_T_618}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_hi_hi_hi_lo_hi = {_logical_T_627, _logical_T_624}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_hi_hi_hi_lo = {logical_hi_hi_hi_hi_lo_hi, logical_hi_hi_hi_hi_lo_lo}; // @[Atomics.scala:40:20]
wire [1:0] logical_hi_hi_hi_hi_hi_lo = {_logical_T_633, _logical_T_630}; // @[Atomics.scala:40:20, :41:8]
wire [1:0] logical_hi_hi_hi_hi_hi_hi = {_logical_T_639, _logical_T_636}; // @[Atomics.scala:40:20, :41:8]
wire [3:0] logical_hi_hi_hi_hi_hi = {logical_hi_hi_hi_hi_hi_hi, logical_hi_hi_hi_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [7:0] logical_hi_hi_hi_hi = {logical_hi_hi_hi_hi_hi, logical_hi_hi_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [15:0] logical_hi_hi_hi = {logical_hi_hi_hi_hi, logical_hi_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [31:0] logical_hi_hi = {logical_hi_hi_hi, logical_hi_hi_lo}; // @[Atomics.scala:40:20]
wire [63:0] logical_hi = {logical_hi_hi, logical_hi_lo}; // @[Atomics.scala:40:20]
wire [127:0] logical = {logical_hi, logical_lo}; // @[Atomics.scala:40:20]
wire [1:0] _select_T_1 = adder ? 2'h2 : {1'h0, _select_T}; // @[Atomics.scala:8:7, :10:14, :18:28, :48:{8,24}]
wire [1:0] _select_WIRE_2 = _select_T_1; // @[Atomics.scala:45:42, :48:8]
wire [7:0][1:0] _GEN_0 = {{2'h0}, {2'h0}, {2'h0}, {2'h0}, {2'h3}, {_select_WIRE_2}, {2'h1}, {2'h1}}; // @[Atomics.scala:45:{19,42}]
wire [1:0] select = io_write_0 ? 2'h1 : _GEN_0[io_a_opcode_0]; // @[Atomics.scala:8:7, :45:19]
wire [1:0] selects_0 = _selects_T ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_1 = _selects_T_1 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_2 = _selects_T_2 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_3 = _selects_T_3 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_4 = _selects_T_4 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_5 = _selects_T_5 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_6 = _selects_T_6 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_7 = _selects_T_7 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_8 = _selects_T_8 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_9 = _selects_T_9 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_10 = _selects_T_10 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_11 = _selects_T_11 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_12 = _selects_T_12 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_13 = _selects_T_13 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_14 = _selects_T_14 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [1:0] selects_15 = _selects_T_15 ? select : 2'h0; // @[Atomics.scala:45:19, :57:{27,47}]
wire [7:0] _io_data_out_T = io_data_in_0[7:0]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_0 = _io_data_out_T; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_1 = io_a_data_0[7:0]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_1 = _io_data_out_T_1; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_2 = sum[7:0]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_2 = _io_data_out_T_2; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_3 = logical[7:0]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_3 = _io_data_out_T_3; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_4 = io_data_in_0[15:8]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_1_0 = _io_data_out_T_4; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_5 = io_a_data_0[15:8]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_1_1 = _io_data_out_T_5; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_6 = sum[15:8]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_1_2 = _io_data_out_T_6; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_7 = logical[15:8]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_1_3 = _io_data_out_T_7; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_8 = io_data_in_0[23:16]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_2_0 = _io_data_out_T_8; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_9 = io_a_data_0[23:16]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_2_1 = _io_data_out_T_9; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_10 = sum[23:16]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_2_2 = _io_data_out_T_10; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_11 = logical[23:16]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_2_3 = _io_data_out_T_11; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_12 = io_data_in_0[31:24]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_3_0 = _io_data_out_T_12; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_13 = io_a_data_0[31:24]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_3_1 = _io_data_out_T_13; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_14 = sum[31:24]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_3_2 = _io_data_out_T_14; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_15 = logical[31:24]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_3_3 = _io_data_out_T_15; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_16 = io_data_in_0[39:32]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_4_0 = _io_data_out_T_16; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_17 = io_a_data_0[39:32]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_4_1 = _io_data_out_T_17; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_18 = sum[39:32]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_4_2 = _io_data_out_T_18; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_19 = logical[39:32]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_4_3 = _io_data_out_T_19; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_20 = io_data_in_0[47:40]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_5_0 = _io_data_out_T_20; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_21 = io_a_data_0[47:40]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_5_1 = _io_data_out_T_21; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_22 = sum[47:40]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_5_2 = _io_data_out_T_22; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_23 = logical[47:40]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_5_3 = _io_data_out_T_23; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_24 = io_data_in_0[55:48]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_6_0 = _io_data_out_T_24; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_25 = io_a_data_0[55:48]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_6_1 = _io_data_out_T_25; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_26 = sum[55:48]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_6_2 = _io_data_out_T_26; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_27 = logical[55:48]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_6_3 = _io_data_out_T_27; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_28 = io_data_in_0[63:56]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_7_0 = _io_data_out_T_28; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_29 = io_a_data_0[63:56]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_7_1 = _io_data_out_T_29; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_30 = sum[63:56]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_7_2 = _io_data_out_T_30; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_31 = logical[63:56]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_7_3 = _io_data_out_T_31; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_32 = io_data_in_0[71:64]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_8_0 = _io_data_out_T_32; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_33 = io_a_data_0[71:64]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_8_1 = _io_data_out_T_33; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_34 = sum[71:64]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_8_2 = _io_data_out_T_34; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_35 = logical[71:64]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_8_3 = _io_data_out_T_35; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_36 = io_data_in_0[79:72]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_9_0 = _io_data_out_T_36; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_37 = io_a_data_0[79:72]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_9_1 = _io_data_out_T_37; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_38 = sum[79:72]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_9_2 = _io_data_out_T_38; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_39 = logical[79:72]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_9_3 = _io_data_out_T_39; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_40 = io_data_in_0[87:80]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_10_0 = _io_data_out_T_40; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_41 = io_a_data_0[87:80]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_10_1 = _io_data_out_T_41; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_42 = sum[87:80]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_10_2 = _io_data_out_T_42; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_43 = logical[87:80]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_10_3 = _io_data_out_T_43; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_44 = io_data_in_0[95:88]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_11_0 = _io_data_out_T_44; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_45 = io_a_data_0[95:88]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_11_1 = _io_data_out_T_45; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_46 = sum[95:88]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_11_2 = _io_data_out_T_46; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_47 = logical[95:88]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_11_3 = _io_data_out_T_47; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_48 = io_data_in_0[103:96]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_12_0 = _io_data_out_T_48; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_49 = io_a_data_0[103:96]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_12_1 = _io_data_out_T_49; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_50 = sum[103:96]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_12_2 = _io_data_out_T_50; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_51 = logical[103:96]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_12_3 = _io_data_out_T_51; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_52 = io_data_in_0[111:104]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_13_0 = _io_data_out_T_52; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_53 = io_a_data_0[111:104]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_13_1 = _io_data_out_T_53; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_54 = sum[111:104]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_13_2 = _io_data_out_T_54; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_55 = logical[111:104]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_13_3 = _io_data_out_T_55; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_56 = io_data_in_0[119:112]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_14_0 = _io_data_out_T_56; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_57 = io_a_data_0[119:112]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_14_1 = _io_data_out_T_57; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_58 = sum[119:112]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_14_2 = _io_data_out_T_58; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_59 = logical[119:112]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_14_3 = _io_data_out_T_59; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_60 = io_data_in_0[127:120]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_15_0 = _io_data_out_T_60; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_61 = io_a_data_0[127:120]; // @[Atomics.scala:8:7, :59:59]
wire [7:0] _io_data_out_WIRE_15_1 = _io_data_out_T_61; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_62 = sum[127:120]; // @[Atomics.scala:24:57, :59:59]
wire [7:0] _io_data_out_WIRE_15_2 = _io_data_out_T_62; // @[Atomics.scala:59:{12,59}]
wire [7:0] _io_data_out_T_63 = logical[127:120]; // @[Atomics.scala:40:20, :59:59]
wire [7:0] _io_data_out_WIRE_15_3 = _io_data_out_T_63; // @[Atomics.scala:59:{12,59}]
wire [3:0][7:0] _GEN_1 = {{_io_data_out_WIRE_1_3}, {_io_data_out_WIRE_1_2}, {_io_data_out_WIRE_1_1}, {_io_data_out_WIRE_1_0}}; // @[Atomics.scala:58:21, :59:12]
wire [3:0][7:0] _GEN_2 = {{_io_data_out_WIRE_3}, {_io_data_out_WIRE_2}, {_io_data_out_WIRE_1}, {_io_data_out_WIRE_0}}; // @[Atomics.scala:58:21, :59:12]
wire [15:0] io_data_out_lo_lo_lo = {_GEN_1[selects_1], _GEN_2[selects_0]}; // @[Atomics.scala:57:47, :58:21]
wire [3:0][7:0] _GEN_3 = {{_io_data_out_WIRE_3_3}, {_io_data_out_WIRE_3_2}, {_io_data_out_WIRE_3_1}, {_io_data_out_WIRE_3_0}}; // @[Atomics.scala:58:21, :59:12]
wire [3:0][7:0] _GEN_4 = {{_io_data_out_WIRE_2_3}, {_io_data_out_WIRE_2_2}, {_io_data_out_WIRE_2_1}, {_io_data_out_WIRE_2_0}}; // @[Atomics.scala:58:21, :59:12]
wire [15:0] io_data_out_lo_lo_hi = {_GEN_3[selects_3], _GEN_4[selects_2]}; // @[Atomics.scala:57:47, :58:21]
wire [31:0] io_data_out_lo_lo = {io_data_out_lo_lo_hi, io_data_out_lo_lo_lo}; // @[Atomics.scala:58:21]
wire [3:0][7:0] _GEN_5 = {{_io_data_out_WIRE_5_3}, {_io_data_out_WIRE_5_2}, {_io_data_out_WIRE_5_1}, {_io_data_out_WIRE_5_0}}; // @[Atomics.scala:58:21, :59:12]
wire [3:0][7:0] _GEN_6 = {{_io_data_out_WIRE_4_3}, {_io_data_out_WIRE_4_2}, {_io_data_out_WIRE_4_1}, {_io_data_out_WIRE_4_0}}; // @[Atomics.scala:58:21, :59:12]
wire [15:0] io_data_out_lo_hi_lo = {_GEN_5[selects_5], _GEN_6[selects_4]}; // @[Atomics.scala:57:47, :58:21]
wire [3:0][7:0] _GEN_7 = {{_io_data_out_WIRE_7_3}, {_io_data_out_WIRE_7_2}, {_io_data_out_WIRE_7_1}, {_io_data_out_WIRE_7_0}}; // @[Atomics.scala:58:21, :59:12]
wire [3:0][7:0] _GEN_8 = {{_io_data_out_WIRE_6_3}, {_io_data_out_WIRE_6_2}, {_io_data_out_WIRE_6_1}, {_io_data_out_WIRE_6_0}}; // @[Atomics.scala:58:21, :59:12]
wire [15:0] io_data_out_lo_hi_hi = {_GEN_7[selects_7], _GEN_8[selects_6]}; // @[Atomics.scala:57:47, :58:21]
wire [31:0] io_data_out_lo_hi = {io_data_out_lo_hi_hi, io_data_out_lo_hi_lo}; // @[Atomics.scala:58:21]
wire [63:0] io_data_out_lo = {io_data_out_lo_hi, io_data_out_lo_lo}; // @[Atomics.scala:58:21]
wire [3:0][7:0] _GEN_9 = {{_io_data_out_WIRE_9_3}, {_io_data_out_WIRE_9_2}, {_io_data_out_WIRE_9_1}, {_io_data_out_WIRE_9_0}}; // @[Atomics.scala:58:21, :59:12]
wire [3:0][7:0] _GEN_10 = {{_io_data_out_WIRE_8_3}, {_io_data_out_WIRE_8_2}, {_io_data_out_WIRE_8_1}, {_io_data_out_WIRE_8_0}}; // @[Atomics.scala:58:21, :59:12]
wire [15:0] io_data_out_hi_lo_lo = {_GEN_9[selects_9], _GEN_10[selects_8]}; // @[Atomics.scala:57:47, :58:21]
wire [3:0][7:0] _GEN_11 = {{_io_data_out_WIRE_11_3}, {_io_data_out_WIRE_11_2}, {_io_data_out_WIRE_11_1}, {_io_data_out_WIRE_11_0}}; // @[Atomics.scala:58:21, :59:12]
wire [3:0][7:0] _GEN_12 = {{_io_data_out_WIRE_10_3}, {_io_data_out_WIRE_10_2}, {_io_data_out_WIRE_10_1}, {_io_data_out_WIRE_10_0}}; // @[Atomics.scala:58:21, :59:12]
wire [15:0] io_data_out_hi_lo_hi = {_GEN_11[selects_11], _GEN_12[selects_10]}; // @[Atomics.scala:57:47, :58:21]
wire [31:0] io_data_out_hi_lo = {io_data_out_hi_lo_hi, io_data_out_hi_lo_lo}; // @[Atomics.scala:58:21]
wire [3:0][7:0] _GEN_13 = {{_io_data_out_WIRE_13_3}, {_io_data_out_WIRE_13_2}, {_io_data_out_WIRE_13_1}, {_io_data_out_WIRE_13_0}}; // @[Atomics.scala:58:21, :59:12]
wire [3:0][7:0] _GEN_14 = {{_io_data_out_WIRE_12_3}, {_io_data_out_WIRE_12_2}, {_io_data_out_WIRE_12_1}, {_io_data_out_WIRE_12_0}}; // @[Atomics.scala:58:21, :59:12]
wire [15:0] io_data_out_hi_hi_lo = {_GEN_13[selects_13], _GEN_14[selects_12]}; // @[Atomics.scala:57:47, :58:21]
wire [3:0][7:0] _GEN_15 = {{_io_data_out_WIRE_15_3}, {_io_data_out_WIRE_15_2}, {_io_data_out_WIRE_15_1}, {_io_data_out_WIRE_15_0}}; // @[Atomics.scala:58:21, :59:12]
wire [3:0][7:0] _GEN_16 = {{_io_data_out_WIRE_14_3}, {_io_data_out_WIRE_14_2}, {_io_data_out_WIRE_14_1}, {_io_data_out_WIRE_14_0}}; // @[Atomics.scala:58:21, :59:12]
wire [15:0] io_data_out_hi_hi_hi = {_GEN_15[selects_15], _GEN_16[selects_14]}; // @[Atomics.scala:57:47, :58:21]
wire [31:0] io_data_out_hi_hi = {io_data_out_hi_hi_hi, io_data_out_hi_hi_lo}; // @[Atomics.scala:58:21]
wire [63:0] io_data_out_hi = {io_data_out_hi_hi, io_data_out_hi_lo}; // @[Atomics.scala:58:21]
assign _io_data_out_T_64 = {io_data_out_hi, io_data_out_lo}; // @[Atomics.scala:58:21]
assign io_data_out_0 = _io_data_out_T_64; // @[Atomics.scala:8:7, :58:21]
assign io_data_out = io_data_out_0; // @[Atomics.scala:8:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_64( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [3:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [3:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [9:0] inflight; // @[Monitor.scala:614:27]
reg [39:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [39:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire [15:0] _GEN_0 = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [15:0] _GEN_3 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [9:0] inflight_1; // @[Monitor.scala:726:35]
reg [39:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToIN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of
the University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.log2Up
import scala.math._
import consts._
class RecFNToIN(expWidth: Int, sigWidth: Int, intWidth: Int) extends chisel3.Module
{
override def desiredName = s"RecFNToIN_e${expWidth}_s${sigWidth}_i${intWidth}"
val io = IO(new Bundle {
val in = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val signedOut = Input(Bool())
val out = Output(Bits(intWidth.W))
val intExceptionFlags = Output(Bits(3.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(expWidth, sigWidth, io.in)
val magGeOne = rawIn.sExp(expWidth)
val posExp = rawIn.sExp(expWidth - 1, 0)
val magJustBelowOne = !magGeOne && posExp.andR
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
/*------------------------------------------------------------------------
| Assuming the input floating-point value is not a NaN, its magnitude is
| at least 1, and it is not obviously so large as to lead to overflow,
| convert its significand to fixed-point (i.e., with the binary point in a
| fixed location). For a non-NaN input with a magnitude less than 1, this
| expression contrives to ensure that the integer bits of 'alignedSig'
| will all be zeros.
*------------------------------------------------------------------------*/
val shiftedSig =
(magGeOne ## rawIn.sig(sigWidth - 2, 0))<<
Mux(magGeOne,
rawIn.sExp(min(expWidth - 2, log2Up(intWidth) - 1), 0),
0.U
)
val alignedSig =
(shiftedSig>>(sigWidth - 2)) ## shiftedSig(sigWidth - 3, 0).orR
val unroundedInt = 0.U(intWidth.W) | alignedSig>>2
val common_inexact = Mux(magGeOne, alignedSig(1, 0).orR, !rawIn.isZero)
val roundIncr_near_even =
(magGeOne && (alignedSig(2, 1).andR || alignedSig(1, 0).andR)) ||
(magJustBelowOne && alignedSig(1, 0).orR)
val roundIncr_near_maxMag = (magGeOne && alignedSig(1)) || magJustBelowOne
val roundIncr =
(roundingMode_near_even && roundIncr_near_even ) ||
(roundingMode_near_maxMag && roundIncr_near_maxMag) ||
((roundingMode_min || roundingMode_odd) &&
(rawIn.sign && common_inexact)) ||
(roundingMode_max && (!rawIn.sign && common_inexact))
val complUnroundedInt = Mux(rawIn.sign, ~unroundedInt, unroundedInt)
val roundedInt =
Mux(roundIncr ^ rawIn.sign,
complUnroundedInt + 1.U,
complUnroundedInt
) | (roundingMode_odd && common_inexact)
val magGeOne_atOverflowEdge = (posExp === (intWidth - 1).U)
//*** CHANGE TO TAKE BITS FROM THE ORIGINAL 'rawIn.sig' INSTEAD OF FROM
//*** 'unroundedInt'?:
val roundCarryBut2 = unroundedInt(intWidth - 3, 0).andR && roundIncr
val common_overflow =
Mux(magGeOne,
(posExp >= intWidth.U) ||
Mux(io.signedOut,
Mux(rawIn.sign,
magGeOne_atOverflowEdge &&
(unroundedInt(intWidth - 2, 0).orR || roundIncr),
magGeOne_atOverflowEdge ||
((posExp === (intWidth - 2).U) && roundCarryBut2)
),
rawIn.sign ||
(magGeOne_atOverflowEdge &&
unroundedInt(intWidth - 2) && roundCarryBut2)
),
!io.signedOut && rawIn.sign && roundIncr
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val invalidExc = rawIn.isNaN || rawIn.isInf
val overflow = !invalidExc && common_overflow
val inexact = !invalidExc && !common_overflow && common_inexact
val excSign = !rawIn.isNaN && rawIn.sign
val excOut =
Mux((io.signedOut === excSign),
(BigInt(1)<<(intWidth - 1)).U,
0.U
) |
Mux(!excSign, ((BigInt(1)<<(intWidth - 1)) - 1).U, 0.U)
io.out := Mux(invalidExc || common_overflow, excOut, roundedInt)
io.intExceptionFlags := invalidExc ## overflow ## inexact
}
| module RecFNToIN_e11_s53_i32( // @[RecFNToIN.scala:46:7]
input [64:0] io_in, // @[RecFNToIN.scala:49:16]
input [2:0] io_roundingMode, // @[RecFNToIN.scala:49:16]
input io_signedOut, // @[RecFNToIN.scala:49:16]
output [2:0] io_intExceptionFlags // @[RecFNToIN.scala:49:16]
);
wire magJustBelowOne = ~(io_in[63]) & (&(io_in[62:52])); // @[RecFNToIN.scala:61:30, :62:28, :63:{27,37,47}]
wire [83:0] shiftedSig = {31'h0, io_in[63], io_in[51:0]} << (io_in[63] ? io_in[56:52] : 5'h0); // @[rawFloatFromRecFN.scala:61:49]
wire [1:0] _roundIncr_near_even_T_6 = {shiftedSig[51], |(shiftedSig[50:0])}; // @[RecFNToIN.scala:83:49, :89:{51,69}, :92:50]
wire common_inexact = io_in[63] ? (|_roundIncr_near_even_T_6) : (|(io_in[63:61])); // @[rawFloatFromRecFN.scala:51:21, :52:{28,53}]
wire roundIncr = io_roundingMode == 3'h0 & (io_in[63] & ((&(shiftedSig[52:51])) | (&_roundIncr_near_even_T_6)) | magJustBelowOne & (|_roundIncr_near_even_T_6)) | io_roundingMode == 3'h4 & (io_in[63] & shiftedSig[51] | magJustBelowOne) | (io_roundingMode == 3'h2 | io_roundingMode == 3'h6) & io_in[64] & common_inexact | io_roundingMode == 3'h3 & ~(io_in[64]) & common_inexact; // @[rawFloatFromRecFN.scala:52:53, :59:25]
wire magGeOne_atOverflowEdge = io_in[62:52] == 11'h1F; // @[RecFNToIN.scala:62:28, :110:43]
wire roundCarryBut2 = (&(shiftedSig[81:52])) & roundIncr; // @[RecFNToIN.scala:83:49, :90:52, :98:61, :99:61, :101:46, :113:{38,56,61}]
wire common_overflow = io_in[63] ? (|(io_in[62:57])) | (io_signedOut ? (io_in[64] ? magGeOne_atOverflowEdge & ((|(shiftedSig[82:52])) | roundIncr) : magGeOne_atOverflowEdge | io_in[62:52] == 11'h1E & roundCarryBut2) : io_in[64] | magGeOne_atOverflowEdge & shiftedSig[82] & roundCarryBut2) : ~io_signedOut & io_in[64] & roundIncr; // @[rawFloatFromRecFN.scala:59:25]
wire invalidExc = (&(io_in[63:62])) & io_in[61] | (&(io_in[63:62])) & ~(io_in[61]); // @[rawFloatFromRecFN.scala:51:21, :53:{28,53}, :56:{33,41}, :57:{33,36}]
assign io_intExceptionFlags = {invalidExc, ~invalidExc & common_overflow, ~invalidExc & ~common_overflow & common_inexact}; // @[RecFNToIN.scala:46:7, :92:29, :115:12, :133:34, :134:{20,32}, :135:{32,35,52}, :146:52]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File SinkC.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class SinkCResponse(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val last = Bool()
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val source = UInt(params.inner.bundle.sourceBits.W)
val param = UInt(3.W)
val data = Bool()
}
class PutBufferCEntry(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val data = UInt(params.inner.bundle.dataBits.W)
val corrupt = Bool()
}
class SinkC(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Decoupled(new FullRequest(params)) // Release
val resp = Valid(new SinkCResponse(params)) // ProbeAck
val c = Flipped(Decoupled(new TLBundleC(params.inner.bundle)))
// Find 'way' via MSHR CAM lookup
val set = UInt(params.setBits.W)
val way = Flipped(UInt(params.wayBits.W))
// ProbeAck write-back
val bs_adr = Decoupled(new BankedStoreInnerAddress(params))
val bs_dat = new BankedStoreInnerPoison(params)
// SourceD sideband
val rel_pop = Flipped(Decoupled(new PutBufferPop(params)))
val rel_beat = new PutBufferCEntry(params)
})
if (params.firstLevel) {
// Tie off unused ports
io.req.valid := false.B
io.req.bits := DontCare
io.resp.valid := false.B
io.resp.bits := DontCare
io.c.ready := true.B
io.set := 0.U
io.bs_adr.valid := false.B
io.bs_adr.bits := DontCare
io.bs_dat := DontCare
io.rel_pop.ready := true.B
io.rel_beat := DontCare
} else {
// No restrictions on the type of buffer
val c = params.micro.innerBuf.c(io.c)
val (tag, set, offset) = params.parseAddress(c.bits.address)
val (first, last, _, beat) = params.inner.count(c)
val hasData = params.inner.hasData(c.bits)
val raw_resp = c.bits.opcode === TLMessages.ProbeAck || c.bits.opcode === TLMessages.ProbeAckData
val resp = Mux(c.valid, raw_resp, RegEnable(raw_resp, c.valid))
// Handling of C is broken into two cases:
// ProbeAck
// if hasData, must be written to BankedStore
// if last beat, trigger resp
// Release
// if first beat, trigger req
// if hasData, go to putBuffer
// if hasData && first beat, must claim a list
assert (!(c.valid && c.bits.corrupt), "Data poisoning unavailable")
io.set := Mux(c.valid, set, RegEnable(set, c.valid)) // finds us the way
// Cut path from inner C to the BankedStore SRAM setup
// ... this makes it easier to layout the L2 data banks far away
val bs_adr = Wire(chiselTypeOf(io.bs_adr))
io.bs_adr <> Queue(bs_adr, 1, pipe=true)
io.bs_dat.data := RegEnable(c.bits.data, bs_adr.fire)
bs_adr.valid := resp && (!first || (c.valid && hasData))
bs_adr.bits.noop := !c.valid
bs_adr.bits.way := io.way
bs_adr.bits.set := io.set
bs_adr.bits.beat := Mux(c.valid, beat, RegEnable(beat + bs_adr.ready.asUInt, c.valid))
bs_adr.bits.mask := ~0.U(params.innerMaskBits.W)
params.ccover(bs_adr.valid && !bs_adr.ready, "SINKC_SRAM_STALL", "Data SRAM busy")
io.resp.valid := resp && c.valid && (first || last) && (!hasData || bs_adr.ready)
io.resp.bits.last := last
io.resp.bits.set := set
io.resp.bits.tag := tag
io.resp.bits.source := c.bits.source
io.resp.bits.param := c.bits.param
io.resp.bits.data := hasData
val putbuffer = Module(new ListBuffer(ListBufferParameters(new PutBufferCEntry(params), params.relLists, params.relBeats, false)))
val lists = RegInit(0.U(params.relLists.W))
val lists_set = WireInit(init = 0.U(params.relLists.W))
val lists_clr = WireInit(init = 0.U(params.relLists.W))
lists := (lists | lists_set) & ~lists_clr
val free = !lists.andR
val freeOH = ~(leftOR(~lists) << 1) & ~lists
val freeIdx = OHToUInt(freeOH)
val req_block = first && !io.req.ready
val buf_block = hasData && !putbuffer.io.push.ready
val set_block = hasData && first && !free
params.ccover(c.valid && !raw_resp && req_block, "SINKC_REQ_STALL", "No MSHR available to sink request")
params.ccover(c.valid && !raw_resp && buf_block, "SINKC_BUF_STALL", "No space in putbuffer for beat")
params.ccover(c.valid && !raw_resp && set_block, "SINKC_SET_STALL", "No space in putbuffer for request")
c.ready := Mux(raw_resp, !hasData || bs_adr.ready, !req_block && !buf_block && !set_block)
io.req.valid := !resp && c.valid && first && !buf_block && !set_block
putbuffer.io.push.valid := !resp && c.valid && hasData && !req_block && !set_block
when (!resp && c.valid && first && hasData && !req_block && !buf_block) { lists_set := freeOH }
val put = Mux(first, freeIdx, RegEnable(freeIdx, first))
io.req.bits.prio := VecInit(4.U(3.W).asBools)
io.req.bits.control:= false.B
io.req.bits.opcode := c.bits.opcode
io.req.bits.param := c.bits.param
io.req.bits.size := c.bits.size
io.req.bits.source := c.bits.source
io.req.bits.offset := offset
io.req.bits.set := set
io.req.bits.tag := tag
io.req.bits.put := put
putbuffer.io.push.bits.index := put
putbuffer.io.push.bits.data.data := c.bits.data
putbuffer.io.push.bits.data.corrupt := c.bits.corrupt
// Grant access to pop the data
putbuffer.io.pop.bits := io.rel_pop.bits.index
putbuffer.io.pop.valid := io.rel_pop.fire
io.rel_pop.ready := putbuffer.io.valid(io.rel_pop.bits.index(log2Ceil(params.relLists)-1,0))
io.rel_beat := putbuffer.io.data
when (io.rel_pop.fire && io.rel_pop.bits.last) {
lists_clr := UIntToOH(io.rel_pop.bits.index, params.relLists)
}
}
}
| module SinkC( // @[SinkC.scala:41:7]
input clock, // @[SinkC.scala:41:7]
input reset, // @[SinkC.scala:41:7]
input io_req_ready, // @[SinkC.scala:43:14]
input [2:0] io_way, // @[SinkC.scala:43:14]
input io_rel_pop_valid, // @[SinkC.scala:43:14]
input [5:0] io_rel_pop_bits_index, // @[SinkC.scala:43:14]
input io_rel_pop_bits_last // @[SinkC.scala:43:14]
);
wire io_req_ready_0 = io_req_ready; // @[SinkC.scala:41:7]
wire [2:0] io_way_0 = io_way; // @[SinkC.scala:41:7]
wire io_rel_pop_valid_0 = io_rel_pop_valid; // @[SinkC.scala:41:7]
wire [5:0] io_rel_pop_bits_index_0 = io_rel_pop_bits_index; // @[SinkC.scala:41:7]
wire io_rel_pop_bits_last_0 = io_rel_pop_bits_last; // @[SinkC.scala:41:7]
wire [63:0] io_c_bits_data = 64'h0; // @[SinkC.scala:41:7]
wire [63:0] io_bs_dat_data = 64'h0; // @[SinkC.scala:41:7]
wire [63:0] io_rel_beat_data = 64'h0; // @[SinkC.scala:41:7]
wire [31:0] io_c_bits_address = 32'h0; // @[SinkC.scala:41:7, :43:14]
wire io_c_ready = 1'h1; // @[SinkC.scala:41:7]
wire io_bs_adr_ready = 1'h1; // @[SinkC.scala:41:7]
wire io_rel_pop_ready = 1'h1; // @[SinkC.scala:41:7]
wire [9:0] io_req_bits_set = 10'h0; // @[SinkC.scala:41:7]
wire [9:0] io_resp_bits_set = 10'h0; // @[SinkC.scala:41:7]
wire [9:0] io_set = 10'h0; // @[SinkC.scala:41:7]
wire [9:0] io_bs_adr_bits_set = 10'h0; // @[SinkC.scala:41:7]
wire [12:0] io_req_bits_tag = 13'h0; // @[SinkC.scala:41:7]
wire [12:0] io_resp_bits_tag = 13'h0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_source = 6'h0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_offset = 6'h0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_put = 6'h0; // @[SinkC.scala:41:7]
wire [5:0] io_resp_bits_source = 6'h0; // @[SinkC.scala:41:7]
wire [5:0] io_c_bits_source = 6'h0; // @[SinkC.scala:41:7]
wire [2:0] io_req_bits_opcode = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_req_bits_param = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_req_bits_size = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_resp_bits_param = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_opcode = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_param = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_size = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_bs_adr_bits_way = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_bs_adr_bits_beat = 3'h0; // @[SinkC.scala:41:7]
wire io_req_valid = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_prio_0 = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_prio_1 = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_prio_2 = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_control = 1'h0; // @[SinkC.scala:41:7]
wire io_resp_valid = 1'h0; // @[SinkC.scala:41:7]
wire io_resp_bits_last = 1'h0; // @[SinkC.scala:41:7]
wire io_resp_bits_data = 1'h0; // @[SinkC.scala:41:7]
wire io_c_valid = 1'h0; // @[SinkC.scala:41:7]
wire io_c_bits_corrupt = 1'h0; // @[SinkC.scala:41:7]
wire io_bs_adr_valid = 1'h0; // @[SinkC.scala:41:7]
wire io_bs_adr_bits_noop = 1'h0; // @[SinkC.scala:41:7]
wire io_bs_adr_bits_mask = 1'h0; // @[SinkC.scala:41:7]
wire io_rel_beat_corrupt = 1'h0; // @[SinkC.scala:41:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class
RecFNToRecFN(
inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int)
extends chisel3.RawModule
{
val io = IO(new Bundle {
val in = Input(Bits((inExpWidth + inSigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in);
if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
io.out := io.in<<(outSigWidth - inSigWidth)
io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W)
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
inExpWidth,
inSigWidth,
outExpWidth,
outSigWidth,
flRoundOpt_sigMSBitAlwaysZero
))
roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn)
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := rawIn
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module RecFNToRecFN_42( // @[RecFNToRecFN.scala:44:5]
input [32:0] io_in, // @[RecFNToRecFN.scala:48:16]
output [32:0] io_out // @[RecFNToRecFN.scala:48:16]
);
wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5]
wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35]
wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54]
wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5]
wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5]
wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35]
wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}]
wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46]
assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54]
assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_3( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [31:0] io_in_a_0_bits, // @[Tile.scala:17:14]
input [31:0] io_in_b_0_bits, // @[Tile.scala:17:14]
input [31:0] io_in_d_0_bits, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [3:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [31:0] io_out_c_0_bits, // @[Tile.scala:17:14]
output [31:0] io_out_b_0_bits, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [3:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0, // @[Tile.scala:17:14]
output io_bad_dataflow // @[Tile.scala:17:14]
);
wire [31:0] io_in_a_0_bits_0 = io_in_a_0_bits; // @[Tile.scala:16:7]
wire [31:0] io_in_b_0_bits_0 = io_in_b_0_bits; // @[Tile.scala:16:7]
wire [31:0] io_in_d_0_bits_0 = io_in_d_0_bits; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [3:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire [31:0] io_out_a_0_bits; // @[Tile.scala:16:7]
wire [31:0] io_out_c_0_bits_0; // @[Tile.scala:16:7]
wire [31:0] io_out_b_0_bits_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [3:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
wire io_bad_dataflow_0; // @[Tile.scala:16:7]
PE_19 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a_bits (io_in_a_0_bits_0), // @[Tile.scala:16:7]
.io_in_b_bits (io_in_b_0_bits_0), // @[Tile.scala:16:7]
.io_in_d_bits (io_in_d_0_bits_0), // @[Tile.scala:16:7]
.io_out_a_bits (io_out_a_0_bits),
.io_out_b_bits (io_out_b_0_bits_0),
.io_out_c_bits (io_out_c_0_bits_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0),
.io_bad_dataflow (io_bad_dataflow_0)
); // @[Tile.scala:42:44]
assign io_out_c_0_bits = io_out_c_0_bits_0; // @[Tile.scala:16:7]
assign io_out_b_0_bits = io_out_b_0_bits_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_54( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire [15:0] _roundMask_T_5 = 16'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_4 = 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_10 = 16'hFF00; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_13 = 12'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_14 = 16'hFF0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_15 = 16'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_20 = 16'hF0F0; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_23 = 14'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_24 = 16'h3C3C; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_25 = 16'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_30 = 16'hCCCC; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_33 = 15'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_34 = 16'h6666; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_35 = 16'h5555; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_40 = 16'hAAAA; // @[primitives.scala:77:20]
wire [25:0] _roundedSig_T_15 = 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:24]
wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18]
wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18]
wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16]
wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16]
wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:284:13]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:90:53]
wire _roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:169:38]
wire _unboundedRange_roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:207:38]
wire _common_underflow_T_7 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:222:49]
wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:32]
wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:60]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53]
wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53]
wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53]
wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53]
wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53]
wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27]
wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63]
wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42]
wire _roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:171:29]
wire _roundedSig_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:181:42]
wire _unboundedRange_roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:209:29]
wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60]
wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45]
wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42]
wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39]
wire notNaN_isSpecialInfOut = io_in_isInf_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :236:49]
wire [26:0] adjustedSig = io_in_sig_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :114:22]
wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33]
wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66]
wire doShiftSigDown1 = adjustedSig[26]; // @[RoundAnyRawFNToRecFN.scala:114:22, :120:57]
wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37]
wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31]
wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16]
wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31]
wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50]
wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:200:31]
wire common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:217:40]
wire common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49]
wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire [8:0] _roundMask_T = io_in_sExp_0[8:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :156:37]
wire [8:0] _roundMask_T_1 = ~_roundMask_T; // @[primitives.scala:52:21]
wire roundMask_msb = _roundMask_T_1[8]; // @[primitives.scala:52:21, :58:25]
wire [7:0] roundMask_lsbs = _roundMask_T_1[7:0]; // @[primitives.scala:52:21, :59:26]
wire roundMask_msb_1 = roundMask_lsbs[7]; // @[primitives.scala:58:25, :59:26]
wire [6:0] roundMask_lsbs_1 = roundMask_lsbs[6:0]; // @[primitives.scala:59:26]
wire roundMask_msb_2 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire roundMask_msb_3 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire [5:0] roundMask_lsbs_2 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [5:0] roundMask_lsbs_3 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> roundMask_lsbs_2); // @[primitives.scala:59:26, :76:56]
wire [21:0] _roundMask_T_2 = roundMask_shift[63:42]; // @[primitives.scala:76:56, :78:22]
wire [15:0] _roundMask_T_3 = _roundMask_T_2[15:0]; // @[primitives.scala:77:20, :78:22]
wire [7:0] _roundMask_T_6 = _roundMask_T_3[15:8]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_7 = {8'h0, _roundMask_T_6}; // @[primitives.scala:77:20]
wire [7:0] _roundMask_T_8 = _roundMask_T_3[7:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_9 = {_roundMask_T_8, 8'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_11 = _roundMask_T_9 & 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_12 = _roundMask_T_7 | _roundMask_T_11; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_16 = _roundMask_T_12[15:4]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_17 = {4'h0, _roundMask_T_16 & 12'hF0F}; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_18 = _roundMask_T_12[11:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_19 = {_roundMask_T_18, 4'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_21 = _roundMask_T_19 & 16'hF0F0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_22 = _roundMask_T_17 | _roundMask_T_21; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_26 = _roundMask_T_22[15:2]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_27 = {2'h0, _roundMask_T_26 & 14'h3333}; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_28 = _roundMask_T_22[13:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_29 = {_roundMask_T_28, 2'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_31 = _roundMask_T_29 & 16'hCCCC; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_32 = _roundMask_T_27 | _roundMask_T_31; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_36 = _roundMask_T_32[15:1]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_37 = {1'h0, _roundMask_T_36 & 15'h5555}; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_38 = _roundMask_T_32[14:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_39 = {_roundMask_T_38, 1'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_41 = _roundMask_T_39 & 16'hAAAA; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_42 = _roundMask_T_37 | _roundMask_T_41; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_43 = _roundMask_T_2[21:16]; // @[primitives.scala:77:20, :78:22]
wire [3:0] _roundMask_T_44 = _roundMask_T_43[3:0]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_45 = _roundMask_T_44[1:0]; // @[primitives.scala:77:20]
wire _roundMask_T_46 = _roundMask_T_45[0]; // @[primitives.scala:77:20]
wire _roundMask_T_47 = _roundMask_T_45[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_48 = {_roundMask_T_46, _roundMask_T_47}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_49 = _roundMask_T_44[3:2]; // @[primitives.scala:77:20]
wire _roundMask_T_50 = _roundMask_T_49[0]; // @[primitives.scala:77:20]
wire _roundMask_T_51 = _roundMask_T_49[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_52 = {_roundMask_T_50, _roundMask_T_51}; // @[primitives.scala:77:20]
wire [3:0] _roundMask_T_53 = {_roundMask_T_48, _roundMask_T_52}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_54 = _roundMask_T_43[5:4]; // @[primitives.scala:77:20]
wire _roundMask_T_55 = _roundMask_T_54[0]; // @[primitives.scala:77:20]
wire _roundMask_T_56 = _roundMask_T_54[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_57 = {_roundMask_T_55, _roundMask_T_56}; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_58 = {_roundMask_T_53, _roundMask_T_57}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_59 = {_roundMask_T_42, _roundMask_T_58}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_60 = ~_roundMask_T_59; // @[primitives.scala:73:32, :77:20]
wire [21:0] _roundMask_T_61 = roundMask_msb_2 ? 22'h0 : _roundMask_T_60; // @[primitives.scala:58:25, :73:{21,32}]
wire [21:0] _roundMask_T_62 = ~_roundMask_T_61; // @[primitives.scala:73:{17,21}]
wire [24:0] _roundMask_T_63 = {_roundMask_T_62, 3'h7}; // @[primitives.scala:68:58, :73:17]
wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> roundMask_lsbs_3); // @[primitives.scala:59:26, :76:56]
wire [2:0] _roundMask_T_64 = roundMask_shift_1[2:0]; // @[primitives.scala:76:56, :78:22]
wire [1:0] _roundMask_T_65 = _roundMask_T_64[1:0]; // @[primitives.scala:77:20, :78:22]
wire _roundMask_T_66 = _roundMask_T_65[0]; // @[primitives.scala:77:20]
wire _roundMask_T_67 = _roundMask_T_65[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_68 = {_roundMask_T_66, _roundMask_T_67}; // @[primitives.scala:77:20]
wire _roundMask_T_69 = _roundMask_T_64[2]; // @[primitives.scala:77:20, :78:22]
wire [2:0] _roundMask_T_70 = {_roundMask_T_68, _roundMask_T_69}; // @[primitives.scala:77:20]
wire [2:0] _roundMask_T_71 = roundMask_msb_3 ? _roundMask_T_70 : 3'h0; // @[primitives.scala:58:25, :62:24, :77:20]
wire [24:0] _roundMask_T_72 = roundMask_msb_1 ? _roundMask_T_63 : {22'h0, _roundMask_T_71}; // @[primitives.scala:58:25, :62:24, :67:24, :68:58]
wire [24:0] _roundMask_T_73 = roundMask_msb ? _roundMask_T_72 : 25'h0; // @[primitives.scala:58:25, :62:24, :67:24]
wire [24:0] _roundMask_T_74 = {_roundMask_T_73[24:1], _roundMask_T_73[0] | doShiftSigDown1}; // @[primitives.scala:62:24]
wire [26:0] roundMask = {_roundMask_T_74, 2'h3}; // @[RoundAnyRawFNToRecFN.scala:159:{23,42}]
wire [27:0] _shiftedRoundMask_T = {1'h0, roundMask}; // @[RoundAnyRawFNToRecFN.scala:159:42, :162:41]
wire [26:0] shiftedRoundMask = _shiftedRoundMask_T[27:1]; // @[RoundAnyRawFNToRecFN.scala:162:{41,53}]
wire [26:0] _roundPosMask_T = ~shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:162:53, :163:28]
wire [26:0] roundPosMask = _roundPosMask_T & roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :163:{28,46}]
wire [26:0] _roundPosBit_T = adjustedSig & roundPosMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :163:46, :164:40]
wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}]
wire _roundIncr_T_1 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:67]
wire _roundedSig_T_3 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :175:49]
wire [26:0] _anyRoundExtra_T = adjustedSig & shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :162:53, :165:42]
wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}]
wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36]
wire roundIncr = _roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31]
wire [26:0] _roundedSig_T = adjustedSig | roundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :159:42, :174:32]
wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}]
wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}]
wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30]
wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30]
wire [25:0] _roundedSig_T_6 = roundMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:159:42, :177:35]
wire [25:0] _roundedSig_T_7 = _roundedSig_T_5 ? _roundedSig_T_6 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}, :177:35]
wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}]
wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21]
wire [26:0] _roundedSig_T_10 = ~roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :180:32]
wire [26:0] _roundedSig_T_11 = adjustedSig & _roundedSig_T_10; // @[RoundAnyRawFNToRecFN.scala:114:22, :180:{30,32}]
wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}]
wire [25:0] _roundedSig_T_14 = roundPosMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:163:46, :181:67]
wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12}; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}]
wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47]
wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54]
wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}]
wire [10:0] sRoundedExp = {io_in_sExp_0[9], io_in_sExp_0} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:48:5, :185:{40,76}]
assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37]
assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37]
wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27]
wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27]
assign _common_fractOut_T_2 = doShiftSigDown1 ? _common_fractOut_T : _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :189:16, :190:27, :191:27]
assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16]
wire [3:0] _common_overflow_T = sRoundedExp[10:7]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30]
assign _common_overflow_T_1 = $signed(_common_overflow_T) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}]
assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50]
assign _common_totalUnderflow_T = $signed(sRoundedExp) < 11'sh6B; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31]
assign common_totalUnderflow = _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:125:37, :200:31]
wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45]
wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45, :205:44]
wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:61]
wire unboundedRange_roundPosBit = doShiftSigDown1 ? _unboundedRange_roundPosBit_T : _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :203:{16,45,61}]
wire _unboundedRange_roundIncr_T_1 = unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:67]
wire _unboundedRange_anyRound_T_1 = doShiftSigDown1 & _unboundedRange_anyRound_T; // @[RoundAnyRawFNToRecFN.scala:120:57, :205:{30,44}]
wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:114:22, :205:63]
wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}]
wire unboundedRange_anyRound = _unboundedRange_anyRound_T_1 | _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{30,49,70}]
wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46]
wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27]
wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27]
wire roundCarry = doShiftSigDown1 ? _roundCarry_T : _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :211:16, :212:27, :213:27]
wire [1:0] _common_underflow_T = io_in_sExp_0[9:8]; // @[RoundAnyRawFNToRecFN.scala:48:5, :220:49]
wire _common_underflow_T_1 = _common_underflow_T != 2'h1; // @[RoundAnyRawFNToRecFN.scala:220:{49,64}]
wire _common_underflow_T_2 = anyRound & _common_underflow_T_1; // @[RoundAnyRawFNToRecFN.scala:166:36, :220:{32,64}]
wire _common_underflow_T_3 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57]
wire _common_underflow_T_9 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57, :225:49]
wire _common_underflow_T_4 = roundMask[2]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:71]
wire _common_underflow_T_5 = doShiftSigDown1 ? _common_underflow_T_3 : _common_underflow_T_4; // @[RoundAnyRawFNToRecFN.scala:120:57, :221:{30,57,71}]
wire _common_underflow_T_6 = _common_underflow_T_2 & _common_underflow_T_5; // @[RoundAnyRawFNToRecFN.scala:220:{32,72}, :221:30]
wire _common_underflow_T_8 = roundMask[4]; // @[RoundAnyRawFNToRecFN.scala:159:42, :224:49]
wire _common_underflow_T_10 = doShiftSigDown1 ? _common_underflow_T_8 : _common_underflow_T_9; // @[RoundAnyRawFNToRecFN.scala:120:57, :223:39, :224:49, :225:49]
wire _common_underflow_T_11 = ~_common_underflow_T_10; // @[RoundAnyRawFNToRecFN.scala:223:{34,39}]
wire _common_underflow_T_12 = _common_underflow_T_11; // @[RoundAnyRawFNToRecFN.scala:222:77, :223:34]
wire _common_underflow_T_13 = _common_underflow_T_12 & roundCarry; // @[RoundAnyRawFNToRecFN.scala:211:16, :222:77, :226:38]
wire _common_underflow_T_14 = _common_underflow_T_13 & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :226:38, :227:45]
wire _common_underflow_T_15 = _common_underflow_T_14 & unboundedRange_roundIncr; // @[RoundAnyRawFNToRecFN.scala:208:46, :227:{45,60}]
wire _common_underflow_T_16 = ~_common_underflow_T_15; // @[RoundAnyRawFNToRecFN.scala:222:27, :227:60]
wire _common_underflow_T_17 = _common_underflow_T_6 & _common_underflow_T_16; // @[RoundAnyRawFNToRecFN.scala:220:72, :221:76, :222:27]
assign _common_underflow_T_18 = common_totalUnderflow | _common_underflow_T_17; // @[RoundAnyRawFNToRecFN.scala:125:37, :217:40, :221:76]
assign common_underflow = _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:126:37, :217:40]
assign _common_inexact_T = common_totalUnderflow | anyRound; // @[RoundAnyRawFNToRecFN.scala:125:37, :166:36, :230:49]
assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49]
wire isNaNOut = io_invalidExc_0 | io_in_isNaN_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34]
wire _commonCase_T = ~isNaNOut; // @[RoundAnyRawFNToRecFN.scala:235:34, :237:22]
wire _commonCase_T_1 = ~notNaN_isSpecialInfOut; // @[RoundAnyRawFNToRecFN.scala:236:49, :237:36]
wire _commonCase_T_2 = _commonCase_T & _commonCase_T_1; // @[RoundAnyRawFNToRecFN.scala:237:{22,33,36}]
wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64]
wire commonCase = _commonCase_T_2 & _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{33,61,64}]
wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32]
wire _notNaN_isInfOut_T = overflow; // @[RoundAnyRawFNToRecFN.scala:238:32, :248:45]
wire underflow = commonCase & common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37, :237:61, :239:32]
wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43]
wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}]
wire _pegMinNonzeroMagOut_T = commonCase & common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :237:61, :245:20]
wire notNaN_isInfOut = notNaN_isSpecialInfOut | _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:236:49, :248:{32,45}]
wire signOut = ~isNaNOut & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :250:22]
wire _expOut_T = io_in_isZero_0 | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:48:5, :125:37, :253:32]
wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}]
wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}]
wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14]
wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17]
wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17]
wire [8:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 6'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18]
wire [8:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}]
wire [8:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14]
wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18]
wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15]
wire [8:0] _expOut_T_18 = notNaN_isInfOut ? 9'h180 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16]
wire [8:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16]
wire [8:0] _expOut_T_20 = isNaNOut ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:235:34, :278:16]
wire [8:0] expOut = _expOut_T_19 | _expOut_T_20; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73, :278:16]
wire _fractOut_T = isNaNOut | io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :280:22]
wire _fractOut_T_1 = _fractOut_T | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :280:{22,38}]
wire [22:0] _fractOut_T_2 = {isNaNOut, 22'h0}; // @[RoundAnyRawFNToRecFN.scala:235:34, :281:16]
wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? _fractOut_T_2 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16]
wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11]
wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23]
assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}]
assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33]
wire [1:0] _io_exceptionFlags_T = {io_invalidExc_0, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:23]
wire [2:0] _io_exceptionFlags_T_1 = {_io_exceptionFlags_T, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:{23,41}]
wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, underflow}; // @[RoundAnyRawFNToRecFN.scala:239:32, :288:{41,53}]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}]
assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_183( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_335 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_466( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_232( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_424 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File AtomicAutomata.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.util.leftOR
import scala.math.{min,max}
// Ensures that all downstream RW managers support Atomic operations.
// If !passthrough, intercept all Atomics. Otherwise, only intercept those unsupported downstream.
class TLAtomicAutomata(logical: Boolean = true, arithmetic: Boolean = true, concurrency: Int = 1, passthrough: Boolean = true)(implicit p: Parameters) extends LazyModule
{
require (concurrency >= 1)
val node = TLAdapterNode(
managerFn = { case mp => mp.v1copy(managers = mp.managers.map { m =>
val ourSupport = TransferSizes(1, mp.beatBytes)
def widen(x: TransferSizes) = if (passthrough && x.min <= 2*mp.beatBytes) TransferSizes(1, max(mp.beatBytes, x.max)) else ourSupport
val canDoit = m.supportsPutFull.contains(ourSupport) && m.supportsGet.contains(ourSupport)
// Blow up if there are devices to which we cannot add Atomics, because their R|W are too inflexible
require (!m.supportsPutFull || !m.supportsGet || canDoit, s"${m.name} has $ourSupport, needed PutFull(${m.supportsPutFull}) or Get(${m.supportsGet})")
m.v1copy(
supportsArithmetic = if (!arithmetic || !canDoit) m.supportsArithmetic else widen(m.supportsArithmetic),
supportsLogical = if (!logical || !canDoit) m.supportsLogical else widen(m.supportsLogical),
mayDenyGet = m.mayDenyGet || m.mayDenyPut)
})})
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val managers = edgeOut.manager.managers
val beatBytes = edgeOut.manager.beatBytes
// To which managers are we adding atomic support?
val ourSupport = TransferSizes(1, beatBytes)
val managersNeedingHelp = managers.filter { m =>
m.supportsPutFull.contains(ourSupport) &&
m.supportsGet.contains(ourSupport) &&
((logical && !m.supportsLogical .contains(ourSupport)) ||
(arithmetic && !m.supportsArithmetic.contains(ourSupport)) ||
!passthrough) // we will do atomics for everyone we can
}
// Managers that need help with atomics must necessarily have this node as the root of a tree in the node graph.
// (But they must also ensure no sideband operations can get between the read and write.)
val violations = managersNeedingHelp.flatMap(_.findTreeViolation()).map { node => (node.name, node.inputs.map(_._1.name)) }
require(violations.isEmpty,
s"AtomicAutomata can only help nodes for which it is at the root of a diplomatic node tree," +
"but the following violations were found:\n" +
violations.map(v => s"(${v._1} has parents ${v._2})").mkString("\n"))
// We cannot add atomics to a non-FIFO manager
managersNeedingHelp foreach { m => require (m.fifoId.isDefined) }
// We need to preserve FIFO semantics across FIFO domains, not managers
// Suppose you have Put(42) Atomic(+1) both inflight; valid results: 42 or 43
// If we allow Put(42) Get() Put(+1) concurrent; valid results: 42 43 OR undef
// Making non-FIFO work requires waiting for all Acks to come back (=> use FIFOFixer)
val domainsNeedingHelp = managersNeedingHelp.map(_.fifoId.get).distinct
// Don't overprovision the CAM
val camSize = min(domainsNeedingHelp.size, concurrency)
// Compact the fifoIds to only those we care about
def camFifoId(m: TLSlaveParameters) = m.fifoId.map(id => max(0, domainsNeedingHelp.indexOf(id))).getOrElse(0)
// CAM entry state machine
val FREE = 0.U // unused waiting on Atomic from A
val GET = 3.U // Get sent down A waiting on AccessDataAck from D
val AMO = 2.U // AccessDataAck sent up D waiting for A availability
val ACK = 1.U // Put sent down A waiting for PutAck from D
val params = TLAtomicAutomata.CAMParams(out.a.bits.params, domainsNeedingHelp.size)
// Do we need to do anything at all?
if (camSize > 0) {
val initval = Wire(new TLAtomicAutomata.CAM_S(params))
initval.state := FREE
val cam_s = RegInit(VecInit.fill(camSize)(initval))
val cam_a = Reg(Vec(camSize, new TLAtomicAutomata.CAM_A(params)))
val cam_d = Reg(Vec(camSize, new TLAtomicAutomata.CAM_D(params)))
val cam_free = cam_s.map(_.state === FREE)
val cam_amo = cam_s.map(_.state === AMO)
val cam_abusy = cam_s.map(e => e.state === GET || e.state === AMO) // A is blocked
val cam_dmatch = cam_s.map(e => e.state =/= FREE) // D should inspect these entries
// Can the manager already handle this message?
val a_address = edgeIn.address(in.a.bits)
val a_size = edgeIn.size(in.a.bits)
val a_canLogical = passthrough.B && edgeOut.manager.supportsLogicalFast (a_address, a_size)
val a_canArithmetic = passthrough.B && edgeOut.manager.supportsArithmeticFast(a_address, a_size)
val a_isLogical = in.a.bits.opcode === TLMessages.LogicalData
val a_isArithmetic = in.a.bits.opcode === TLMessages.ArithmeticData
val a_isSupported = Mux(a_isLogical, a_canLogical, Mux(a_isArithmetic, a_canArithmetic, true.B))
// Must we do a Put?
val a_cam_any_put = cam_amo.reduce(_ || _)
val a_cam_por_put = cam_amo.scanLeft(false.B)(_||_).init
val a_cam_sel_put = (cam_amo zip a_cam_por_put) map { case (a, b) => a && !b }
val a_cam_a = PriorityMux(cam_amo, cam_a)
val a_cam_d = PriorityMux(cam_amo, cam_d)
val a_a = a_cam_a.bits.data
val a_d = a_cam_d.data
// Does the A request conflict with an inflight AMO?
val a_fifoId = edgeOut.manager.fastProperty(a_address, camFifoId _, (i:Int) => i.U)
val a_cam_busy = (cam_abusy zip cam_a.map(_.fifoId === a_fifoId)) map { case (a,b) => a&&b } reduce (_||_)
// (Where) are we are allocating in the CAM?
val a_cam_any_free = cam_free.reduce(_ || _)
val a_cam_por_free = cam_free.scanLeft(false.B)(_||_).init
val a_cam_sel_free = (cam_free zip a_cam_por_free) map { case (a,b) => a && !b }
// Logical AMO
val indexes = Seq.tabulate(beatBytes*8) { i => Cat(a_a(i,i), a_d(i,i)) }
val logic_out = Cat(indexes.map(x => a_cam_a.lut(x).asUInt).reverse)
// Arithmetic AMO
val unsigned = a_cam_a.bits.param(1)
val take_max = a_cam_a.bits.param(0)
val adder = a_cam_a.bits.param(2)
val mask = a_cam_a.bits.mask
val signSel = ~(~mask | (mask >> 1))
val signbits_a = Cat(Seq.tabulate(beatBytes) { i => a_a(8*i+7,8*i+7) } .reverse)
val signbits_d = Cat(Seq.tabulate(beatBytes) { i => a_d(8*i+7,8*i+7) } .reverse)
// Move the selected sign bit into the first byte position it will extend
val signbit_a = ((signbits_a & signSel) << 1)(beatBytes-1, 0)
val signbit_d = ((signbits_d & signSel) << 1)(beatBytes-1, 0)
val signext_a = FillInterleaved(8, leftOR(signbit_a))
val signext_d = FillInterleaved(8, leftOR(signbit_d))
// NOTE: sign-extension does not change the relative ordering in EITHER unsigned or signed arithmetic
val wide_mask = FillInterleaved(8, mask)
val a_a_ext = (a_a & wide_mask) | signext_a
val a_d_ext = (a_d & wide_mask) | signext_d
val a_d_inv = Mux(adder, a_d_ext, ~a_d_ext)
val adder_out = a_a_ext + a_d_inv
val h = 8*beatBytes-1 // now sign-extended; use biggest bit
val a_bigger_uneq = unsigned === a_a_ext(h) // result if high bits are unequal
val a_bigger = Mux(a_a_ext(h) === a_d_ext(h), !adder_out(h), a_bigger_uneq)
val pick_a = take_max === a_bigger
val arith_out = Mux(adder, adder_out, Mux(pick_a, a_a, a_d))
// AMO result data
val amo_data =
if (!logical) arith_out else
if (!arithmetic) logic_out else
Mux(a_cam_a.bits.opcode(0), logic_out, arith_out)
// Potentially mutate the message from inner
val source_i = Wire(chiselTypeOf(in.a))
val a_allow = !a_cam_busy && (a_isSupported || a_cam_any_free)
in.a.ready := source_i.ready && a_allow
source_i.valid := in.a.valid && a_allow
source_i.bits := in.a.bits
when (!a_isSupported) { // minimal mux difference
source_i.bits.opcode := TLMessages.Get
source_i.bits.param := 0.U
}
// Potentially take the message from the CAM
val source_c = Wire(chiselTypeOf(in.a))
source_c.valid := a_cam_any_put
source_c.bits := edgeOut.Put(
fromSource = a_cam_a.bits.source,
toAddress = edgeIn.address(a_cam_a.bits),
lgSize = a_cam_a.bits.size,
data = amo_data,
corrupt = a_cam_a.bits.corrupt || a_cam_d.corrupt)._2
source_c.bits.user :<= a_cam_a.bits.user
source_c.bits.echo :<= a_cam_a.bits.echo
// Finishing an AMO from the CAM has highest priority
TLArbiter(TLArbiter.lowestIndexFirst)(out.a, (0.U, source_c), (edgeOut.numBeats1(in.a.bits), source_i))
// Capture the A state into the CAM
when (source_i.fire && !a_isSupported) {
(a_cam_sel_free zip cam_a) foreach { case (en, r) =>
when (en) {
r.fifoId := a_fifoId
r.bits := in.a.bits
r.lut := MuxLookup(in.a.bits.param(1, 0), 0.U(4.W))(Array(
TLAtomics.AND -> 0x8.U,
TLAtomics.OR -> 0xe.U,
TLAtomics.XOR -> 0x6.U,
TLAtomics.SWAP -> 0xc.U))
}
}
(a_cam_sel_free zip cam_s) foreach { case (en, r) =>
when (en) {
r.state := GET
}
}
}
// Advance the put state
when (source_c.fire) {
(a_cam_sel_put zip cam_s) foreach { case (en, r) =>
when (en) {
r.state := ACK
}
}
}
// We need to deal with a potential D response in the same cycle as the A request
val d_first = edgeOut.first(out.d)
val d_cam_sel_raw = cam_a.map(_.bits.source === in.d.bits.source)
val d_cam_sel_match = (d_cam_sel_raw zip cam_dmatch) map { case (a,b) => a&&b }
val d_cam_data = Mux1H(d_cam_sel_match, cam_d.map(_.data))
val d_cam_denied = Mux1H(d_cam_sel_match, cam_d.map(_.denied))
val d_cam_corrupt = Mux1H(d_cam_sel_match, cam_d.map(_.corrupt))
val d_cam_sel_bypass = if (edgeOut.manager.minLatency > 0) false.B else
out.d.bits.source === in.a.bits.source && in.a.valid && !a_isSupported
val d_cam_sel = (a_cam_sel_free zip d_cam_sel_match) map { case (a,d) => Mux(d_cam_sel_bypass, a, d) }
val d_cam_sel_any = d_cam_sel_bypass || d_cam_sel_match.reduce(_ || _)
val d_ackd = out.d.bits.opcode === TLMessages.AccessAckData
val d_ack = out.d.bits.opcode === TLMessages.AccessAck
when (out.d.fire && d_first) {
(d_cam_sel zip cam_d) foreach { case (en, r) =>
when (en && d_ackd) {
r.data := out.d.bits.data
r.denied := out.d.bits.denied
r.corrupt := out.d.bits.corrupt
}
}
(d_cam_sel zip cam_s) foreach { case (en, r) =>
when (en) {
// Note: it is important that this comes AFTER the := GET, so we can go FREE=>GET=>AMO in one cycle
r.state := Mux(d_ackd, AMO, FREE)
}
}
}
val d_drop = d_first && d_ackd && d_cam_sel_any
val d_replace = d_first && d_ack && d_cam_sel_match.reduce(_ || _)
in.d.valid := out.d.valid && !d_drop
out.d.ready := in.d.ready || d_drop
in.d.bits := out.d.bits
when (d_replace) { // minimal muxes
in.d.bits.opcode := TLMessages.AccessAckData
in.d.bits.data := d_cam_data
in.d.bits.corrupt := d_cam_corrupt || out.d.bits.denied
in.d.bits.denied := d_cam_denied || out.d.bits.denied
}
} else {
out.a.valid := in.a.valid
in.a.ready := out.a.ready
out.a.bits := in.a.bits
in.d.valid := out.d.valid
out.d.ready := in.d.ready
in.d.bits := out.d.bits
}
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
in.b.valid := out.b.valid
out.b.ready := in.b.ready
in.b.bits := out.b.bits
out.c.valid := in.c.valid
in.c.ready := out.c.ready
out.c.bits := in.c.bits
out.e.valid := in.e.valid
in.e.ready := out.e.ready
out.e.bits := in.e.bits
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLAtomicAutomata
{
def apply(logical: Boolean = true, arithmetic: Boolean = true, concurrency: Int = 1, passthrough: Boolean = true, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val atomics = LazyModule(new TLAtomicAutomata(logical, arithmetic, concurrency, passthrough) {
override lazy val desiredName = (Seq("TLAtomicAutomata") ++ nameSuffix).mkString("_")
})
atomics.node
}
case class CAMParams(a: TLBundleParameters, domainsNeedingHelp: Int)
class CAM_S(val params: CAMParams) extends Bundle {
val state = UInt(2.W)
}
class CAM_A(val params: CAMParams) extends Bundle {
val bits = new TLBundleA(params.a)
val fifoId = UInt(log2Up(params.domainsNeedingHelp).W)
val lut = UInt(4.W)
}
class CAM_D(val params: CAMParams) extends Bundle {
val data = UInt(params.a.dataBits.W)
val denied = Bool()
val corrupt = Bool()
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMAtomicAutomata(txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("AtomicAutomata"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
// Confirm that the AtomicAutomata combines read + write errors
import TLMessages._
val test = new RequestPattern({a: TLBundleA =>
val doesA = a.opcode === ArithmeticData || a.opcode === LogicalData
val doesR = a.opcode === Get || doesA
val doesW = a.opcode === PutFullData || a.opcode === PutPartialData || doesA
(doesR && RequestPattern.overlaps(Seq(AddressSet(0x08, ~0x08)))(a)) ||
(doesW && RequestPattern.overlaps(Seq(AddressSet(0x10, ~0x10)))(a))
})
(ram.node
:= TLErrorEvaluator(test)
:= TLFragmenter(4, 256)
:= TLDelayer(0.1)
:= TLAtomicAutomata()
:= TLDelayer(0.1)
:= TLErrorEvaluator(test, testOn=true, testOff=true)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMAtomicAutomataTest(txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMAtomicAutomata(txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File PeripheryBus.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.subsystem
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.devices.tilelink.{BuiltInZeroDeviceParams, BuiltInErrorDeviceParams, HasBuiltInDeviceParams, BuiltInDevices}
import freechips.rocketchip.diplomacy.BufferParams
import freechips.rocketchip.tilelink.{
RegionReplicator, ReplicatedRegion, HasTLBusParams, HasRegionReplicatorParams, TLBusWrapper,
TLBusWrapperInstantiationLike, TLFIFOFixer, TLNode, TLXbar, TLInwardNode, TLOutwardNode,
TLBuffer, TLWidthWidget, TLAtomicAutomata, TLEdge
}
import freechips.rocketchip.util.Location
case class BusAtomics(
arithmetic: Boolean = true,
buffer: BufferParams = BufferParams.default,
widenBytes: Option[Int] = None
)
case class PeripheryBusParams(
beatBytes: Int,
blockBytes: Int,
atomics: Option[BusAtomics] = Some(BusAtomics()),
dtsFrequency: Option[BigInt] = None,
zeroDevice: Option[BuiltInZeroDeviceParams] = None,
errorDevice: Option[BuiltInErrorDeviceParams] = None,
replication: Option[ReplicatedRegion] = None)
extends HasTLBusParams
with HasBuiltInDeviceParams
with HasRegionReplicatorParams
with TLBusWrapperInstantiationLike
{
def instantiate(context: HasTileLinkLocations, loc: Location[TLBusWrapper])(implicit p: Parameters): PeripheryBus = {
val pbus = LazyModule(new PeripheryBus(this, loc.name))
pbus.suggestName(loc.name)
context.tlBusWrapperLocationMap += (loc -> pbus)
pbus
}
}
class PeripheryBus(params: PeripheryBusParams, name: String)(implicit p: Parameters)
extends TLBusWrapper(params, name)
{
override lazy val desiredName = s"PeripheryBus_$name"
private val replicator = params.replication.map(r => LazyModule(new RegionReplicator(r)))
val prefixNode = replicator.map { r =>
r.prefix := addressPrefixNexusNode
addressPrefixNexusNode
}
private val fixer = LazyModule(new TLFIFOFixer(TLFIFOFixer.all))
private val node: TLNode = params.atomics.map { pa =>
val in_xbar = LazyModule(new TLXbar(nameSuffix = Some(s"${name}_in")))
val out_xbar = LazyModule(new TLXbar(nameSuffix = Some(s"${name}_out")))
val fixer_node = replicator.map(fixer.node :*= _.node).getOrElse(fixer.node)
(out_xbar.node
:*= fixer_node
:*= TLBuffer(pa.buffer)
:*= (pa.widenBytes.filter(_ > beatBytes).map { w =>
TLWidthWidget(w) :*= TLAtomicAutomata(arithmetic = pa.arithmetic, nameSuffix = Some(name))
} .getOrElse { TLAtomicAutomata(arithmetic = pa.arithmetic, nameSuffix = Some(name)) })
:*= in_xbar.node)
} .getOrElse { TLXbar() :*= fixer.node }
def inwardNode: TLInwardNode = node
def outwardNode: TLOutwardNode = node
def busView: TLEdge = fixer.node.edges.in.head
val builtInDevices: BuiltInDevices = BuiltInDevices.attach(params, outwardNode)
}
File ClockGroup.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.prci
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.resources.FixedClockResource
case class ClockGroupingNode(groupName: String)(implicit valName: ValName)
extends MixedNexusNode(ClockGroupImp, ClockImp)(
dFn = { _ => ClockSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) })
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupingNode(groupName)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
require (node.in.size == 1)
require (in.member.size == out.size)
(in.member.data zip out) foreach { case (i, o) => o := i }
}
}
object ClockGroup
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node
}
case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))})
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupAggregateNode(groupName)
override lazy val desiredName = s"ClockGroupAggregator_$groupName"
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in.unzip
val (out, _) = node.out.unzip
val outputs = out.flatMap(_.member.data)
require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1")
require (in.head.member.size == outputs.size)
in.head.member.data.zip(outputs).foreach { case (i, o) => o := i }
}
}
object ClockGroupAggregator
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node
}
class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val (out, _) = node.out.unzip
out.map { out: ClockGroupBundle =>
out.member.data.foreach { o =>
o.clock := clock; o.reset := reset }
}
}
}
object SimpleClockGroupSource
{
def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node
}
case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName)
extends NexusNode(ClockImp)(
dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) },
uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) },
inputRequiresOutput = false) {
def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix)))
}
class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule
{
val node = new FixedClockBroadcastNode(fixedClockOpt) {
override def circuitIdentity = outputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
override def desiredName = s"FixedClockBroadcast_${out.size}"
require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock")
out.foreach { _ := in }
}
}
object FixedClockBroadcast
{
def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node
}
case class PRCIClockGroupNode()(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { _ => ClockGroupSinkParameters("prci", Nil) },
outputRequiresInput = false)
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module PeripheryBus_ccbus0( // @[ClockDomain.scala:14:9]
input auto_ccbus0_clock_groups_in_member_ccbus0_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_ccbus0_clock_groups_in_member_ccbus0_0_reset // @[LazyModuleImp.scala:107:25]
);
wire fixedClockNode_auto_anon_out_reset; // @[ClockGroup.scala:104:9]
wire fixedClockNode_auto_anon_out_clock; // @[ClockGroup.scala:104:9]
wire clockGroup_auto_out_reset; // @[ClockGroup.scala:24:9]
wire clockGroup_auto_out_clock; // @[ClockGroup.scala:24:9]
wire ccbus0_clock_groups_auto_out_member_ccbus0_0_reset; // @[ClockGroup.scala:53:9]
wire ccbus0_clock_groups_auto_out_member_ccbus0_0_clock; // @[ClockGroup.scala:53:9]
wire auto_ccbus0_clock_groups_in_member_ccbus0_0_clock_0 = auto_ccbus0_clock_groups_in_member_ccbus0_0_clock; // @[ClockDomain.scala:14:9]
wire auto_ccbus0_clock_groups_in_member_ccbus0_0_reset_0 = auto_ccbus0_clock_groups_in_member_ccbus0_0_reset; // @[ClockDomain.scala:14:9]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire ccbus0_clock_groups_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire ccbus0_clock_groups_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire ccbus0_clock_groups__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire clockGroup_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire clockGroup_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire clockGroup__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire fixedClockNode_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire fixedClockNode_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire fixedClockNode__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire broadcast_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire broadcast_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire broadcast__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire ccbus0_clock_groups_auto_in_member_ccbus0_0_clock = auto_ccbus0_clock_groups_in_member_ccbus0_0_clock_0; // @[ClockGroup.scala:53:9]
wire ccbus0_clock_groups_auto_in_member_ccbus0_0_reset = auto_ccbus0_clock_groups_in_member_ccbus0_0_reset_0; // @[ClockGroup.scala:53:9]
wire clockSinkNodeIn_clock; // @[MixedNode.scala:551:17]
wire clockSinkNodeIn_reset; // @[MixedNode.scala:551:17]
wire childClock; // @[LazyModuleImp.scala:155:31]
wire childReset; // @[LazyModuleImp.scala:158:31]
wire ccbus0_clock_groups_nodeIn_member_ccbus0_0_clock = ccbus0_clock_groups_auto_in_member_ccbus0_0_clock; // @[ClockGroup.scala:53:9]
wire ccbus0_clock_groups_nodeOut_member_ccbus0_0_clock; // @[MixedNode.scala:542:17]
wire ccbus0_clock_groups_nodeIn_member_ccbus0_0_reset = ccbus0_clock_groups_auto_in_member_ccbus0_0_reset; // @[ClockGroup.scala:53:9]
wire ccbus0_clock_groups_nodeOut_member_ccbus0_0_reset; // @[MixedNode.scala:542:17]
wire clockGroup_auto_in_member_ccbus0_0_clock = ccbus0_clock_groups_auto_out_member_ccbus0_0_clock; // @[ClockGroup.scala:24:9, :53:9]
wire clockGroup_auto_in_member_ccbus0_0_reset = ccbus0_clock_groups_auto_out_member_ccbus0_0_reset; // @[ClockGroup.scala:24:9, :53:9]
assign ccbus0_clock_groups_auto_out_member_ccbus0_0_clock = ccbus0_clock_groups_nodeOut_member_ccbus0_0_clock; // @[ClockGroup.scala:53:9]
assign ccbus0_clock_groups_auto_out_member_ccbus0_0_reset = ccbus0_clock_groups_nodeOut_member_ccbus0_0_reset; // @[ClockGroup.scala:53:9]
assign ccbus0_clock_groups_nodeOut_member_ccbus0_0_clock = ccbus0_clock_groups_nodeIn_member_ccbus0_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign ccbus0_clock_groups_nodeOut_member_ccbus0_0_reset = ccbus0_clock_groups_nodeIn_member_ccbus0_0_reset; // @[MixedNode.scala:542:17, :551:17]
wire clockGroup_nodeIn_member_ccbus0_0_clock = clockGroup_auto_in_member_ccbus0_0_clock; // @[ClockGroup.scala:24:9]
wire clockGroup_nodeOut_clock; // @[MixedNode.scala:542:17]
wire clockGroup_nodeIn_member_ccbus0_0_reset = clockGroup_auto_in_member_ccbus0_0_reset; // @[ClockGroup.scala:24:9]
wire clockGroup_nodeOut_reset; // @[MixedNode.scala:542:17]
wire fixedClockNode_auto_anon_in_clock = clockGroup_auto_out_clock; // @[ClockGroup.scala:24:9, :104:9]
wire fixedClockNode_auto_anon_in_reset = clockGroup_auto_out_reset; // @[ClockGroup.scala:24:9, :104:9]
assign clockGroup_auto_out_clock = clockGroup_nodeOut_clock; // @[ClockGroup.scala:24:9]
assign clockGroup_auto_out_reset = clockGroup_nodeOut_reset; // @[ClockGroup.scala:24:9]
assign clockGroup_nodeOut_clock = clockGroup_nodeIn_member_ccbus0_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign clockGroup_nodeOut_reset = clockGroup_nodeIn_member_ccbus0_0_reset; // @[MixedNode.scala:542:17, :551:17]
wire fixedClockNode_anonIn_clock = fixedClockNode_auto_anon_in_clock; // @[ClockGroup.scala:104:9]
wire fixedClockNode_anonOut_clock; // @[MixedNode.scala:542:17]
wire fixedClockNode_anonIn_reset = fixedClockNode_auto_anon_in_reset; // @[ClockGroup.scala:104:9]
wire fixedClockNode_anonOut_reset; // @[MixedNode.scala:542:17]
assign clockSinkNodeIn_clock = fixedClockNode_auto_anon_out_clock; // @[ClockGroup.scala:104:9]
assign clockSinkNodeIn_reset = fixedClockNode_auto_anon_out_reset; // @[ClockGroup.scala:104:9]
assign fixedClockNode_auto_anon_out_clock = fixedClockNode_anonOut_clock; // @[ClockGroup.scala:104:9]
assign fixedClockNode_auto_anon_out_reset = fixedClockNode_anonOut_reset; // @[ClockGroup.scala:104:9]
assign fixedClockNode_anonOut_clock = fixedClockNode_anonIn_clock; // @[MixedNode.scala:542:17, :551:17]
assign fixedClockNode_anonOut_reset = fixedClockNode_anonIn_reset; // @[MixedNode.scala:542:17, :551:17]
assign childClock = clockSinkNodeIn_clock; // @[MixedNode.scala:551:17]
assign childReset = clockSinkNodeIn_reset; // @[MixedNode.scala:551:17]
TLXbar_ccbus0_in_i0_o0_a1d8s1k1z1u in_xbar ( // @[PeripheryBus.scala:56:29]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[PeripheryBus.scala:56:29]
TLXbar_ccbus0_out_i0_o0_a1d8s1k1z1u out_xbar ( // @[PeripheryBus.scala:57:30]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[PeripheryBus.scala:57:30]
TLBuffer_1 buffer ( // @[Buffer.scala:75:28]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[Buffer.scala:75:28]
TLAtomicAutomata_ccbus0 atomics ( // @[AtomicAutomata.scala:289:29]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[AtomicAutomata.scala:289:29]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File AsyncResetReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
/** This black-boxes an Async Reset
* (or Set)
* Register.
*
* Because Chisel doesn't support
* parameterized black boxes,
* we unfortunately have to
* instantiate a number of these.
*
* We also have to hard-code the set/
* reset behavior.
*
* Do not confuse an asynchronous
* reset signal with an asynchronously
* reset reg. You should still
* properly synchronize your reset
* deassertion.
*
* @param d Data input
* @param q Data Output
* @param clk Clock Input
* @param rst Reset Input
* @param en Write Enable Input
*
*/
class AsyncResetReg(resetValue: Int = 0) extends RawModule {
val io = IO(new Bundle {
val d = Input(Bool())
val q = Output(Bool())
val en = Input(Bool())
val clk = Input(Clock())
val rst = Input(Reset())
})
val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
class SimpleRegIO(val w: Int) extends Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
}
class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module {
override def desiredName = s"AsyncResetRegVec_w${w}_i${init}"
val io = IO(new SimpleRegIO(w))
val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
object AsyncResetReg {
// Create Single Registers
def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = {
val reg = Module(new AsyncResetReg(if (init) 1 else 0))
reg.io.d := d
reg.io.clk := clk
reg.io.rst := rst
reg.io.en := true.B
name.foreach(reg.suggestName(_))
reg.io.q
}
def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None)
def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name))
// Create Vectors of Registers
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = {
val w = updateData.getWidth max resetData.bitLength
val reg = Module(new AsyncResetRegVec(w, resetData))
name.foreach(reg.suggestName(_))
reg.io.d := updateData
reg.io.en := enable
reg.io.q
}
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData,
resetData, enable, Some(name))
def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B)
def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name))
def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable)
def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name))
def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B)
def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name))
}
| module AsyncResetRegVec_w1_i1( // @[AsyncResetReg.scala:56:7]
input clock, // @[AsyncResetReg.scala:56:7]
input reset, // @[AsyncResetReg.scala:56:7]
input io_d, // @[AsyncResetReg.scala:59:14]
output io_q, // @[AsyncResetReg.scala:59:14]
input io_en // @[AsyncResetReg.scala:59:14]
);
reg reg_0; // @[AsyncResetReg.scala:61:50]
always @(posedge clock or posedge reset) begin // @[AsyncResetReg.scala:56:7]
if (reset) // @[AsyncResetReg.scala:56:7]
reg_0 <= 1'h1; // @[AsyncResetReg.scala:56:7, :61:50]
else if (io_en) // @[AsyncResetReg.scala:59:14]
reg_0 <= io_d; // @[AsyncResetReg.scala:61:50]
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Protocol.scala:
package constellation.protocol
import chisel3._
import chisel3.util._
import constellation.channel._
import constellation.noc._
import constellation.router.{RouterCtrlBundle}
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.subsystem._
import scala.collection.immutable.{ListMap}
// BEGIN: NodeMapping
case class DiplomaticNetworkNodeMapping(
inNodeMapping: ListMap[String, Int] = ListMap[String, Int](),
outNodeMapping: ListMap[String, Int] = ListMap[String, Int]()
) {
// END: NodeMapping
def genUniqueName(all: Seq[Seq[String]]) = {
all.zipWithIndex.map { case (strs, i) =>
val matches = all.take(i).map(_.mkString).count(_ == strs.mkString)
strs.map(s => s"${s}[${matches}]").mkString(",") + "|"
}
}
def getNode(l: String, nodeMapping: ListMap[String, Int]): Option[Int] = {
val keys = nodeMapping.keys.toSeq
val matches = keys.map(k => l.contains(k))
if (matches.filter(i => i).size == 1) {
val index = matches.indexWhere(i => i)
Some(nodeMapping.values.toSeq(index))
} else {
None
}
}
def getNodes(ls: Seq[String], mapping: ListMap[String, Int]): Seq[Option[Int]] = {
ls.map(l => getNode(l, mapping))
}
def getNodesIn(ls: Seq[String]): Seq[Option[Int]] = getNodes(ls, inNodeMapping)
def getNodesOut(ls: Seq[String]): Seq[Option[Int]] = getNodes(ls, outNodeMapping)
}
// BEGIN: ProtocolParams
trait ProtocolParams {
val minPayloadWidth: Int
val ingressNodes: Seq[Int]
val egressNodes: Seq[Int]
val nVirtualNetworks: Int
val vNetBlocking: (Int, Int) => Boolean
val flows: Seq[FlowParams]
def genIO()(implicit p: Parameters): Data
def interface(
terminals: NoCTerminalIO,
ingressOffset: Int,
egressOffset: Int,
protocol: Data)(implicit p: Parameters)
}
// END: ProtocolParams
// BEGIN: ProtocolNoC
case class ProtocolNoCParams(
nocParams: NoCParams,
protocolParams: Seq[ProtocolParams],
widthDivision: Int = 1,
inlineNoC: Boolean = false
)
class ProtocolNoC(params: ProtocolNoCParams)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val ctrl = if (params.nocParams.hasCtrl) Vec(params.nocParams.topology.nNodes, new RouterCtrlBundle) else Nil
val protocol = MixedVec(params.protocolParams.map { u => u.genIO() })
})
// END: ProtocolNoC
if (params.inlineNoC) chisel3.experimental.annotate(
new chisel3.experimental.ChiselAnnotation {
def toFirrtl: firrtl.annotations.Annotation = firrtl.passes.InlineAnnotation(toNamed)
}
)
val protocolParams = params.protocolParams
val minPayloadWidth = protocolParams.map(_.minPayloadWidth).max
val nocPayloadWidth = math.ceil(minPayloadWidth.toDouble / params.widthDivision).toInt
val terminalPayloadWidth = nocPayloadWidth * params.widthDivision
val ingressOffsets = protocolParams.map(_.ingressNodes.size).scanLeft(0)(_+_)
val egressOffsets = protocolParams.map(_.egressNodes.size).scanLeft(0)(_+_)
val vNetOffsets = protocolParams.map(_.nVirtualNetworks).scanLeft(0)(_+_)
val nocParams = params.nocParams.copy(
ingresses = protocolParams.map(_.ingressNodes).flatten.map(i =>
UserIngressParams(i, payloadBits=terminalPayloadWidth)),
egresses = protocolParams.map(_.egressNodes).flatten.map(i =>
UserEgressParams(i, payloadBits=terminalPayloadWidth)),
routerParams = (i) => params.nocParams.routerParams(i).copy(payloadBits=nocPayloadWidth),
vNetBlocking = (blocker, blockee) => {
def protocolId(i: Int) = vNetOffsets.drop(1).indexWhere(_ > i)
if (protocolId(blocker) == protocolId(blockee)) {
protocolParams(protocolId(blocker)).vNetBlocking(
blocker - vNetOffsets(protocolId(blocker)),
blockee - vNetOffsets(protocolId(blockee))
)
} else {
true
}
},
flows = protocolParams.zipWithIndex.map { case (u,i) =>
u.flows.map(f => f.copy(
ingressId = f.ingressId + ingressOffsets(i),
egressId = f.egressId + egressOffsets(i),
vNetId = f.vNetId + vNetOffsets(i)
))
}.flatten
)
val noc = Module(LazyModule(new NoC(nocParams)).module)
noc.io.router_clocks.foreach(_.clock := clock)
noc.io.router_clocks.foreach(_.reset := reset)
(noc.io.router_ctrl zip io.ctrl).foreach { case (l, r) => l <> r }
(protocolParams zip io.protocol).zipWithIndex.foreach { case ((u, io), x) =>
val terminals = Wire(new NoCTerminalIO(
noc.io.ingressParams.drop(ingressOffsets(x)).take(u.ingressNodes.size),
noc.io.egressParams .drop(egressOffsets(x)) .take(u.egressNodes.size)
))
(terminals.ingress zip noc.io.ingress.drop(ingressOffsets(x))).map { case (l,r) => l <> r }
(terminals.egress zip noc.io.egress.drop (egressOffsets(x))).map { case (l,r) => l <> r }
u.interface(
terminals,
ingressOffsets(x),
egressOffsets(x),
io)
}
}
File Tilelink.scala:
package constellation.protocol
import chisel3._
import chisel3.util._
import constellation.channel._
import constellation.noc._
import constellation.soc.{CanAttachToGlobalNoC}
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import scala.collection.immutable.{ListMap}
trait TLFieldHelper {
def getBodyFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleB => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleC => Seq( b.data, b.corrupt)
case b: TLBundleD => Seq( b.data, b.corrupt)
case b: TLBundleE => Seq()
}
def getConstFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleB => Seq(b.opcode, b.param, b.size, b.source, b.address )
case b: TLBundleC => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleD => Seq(b.opcode, b.param, b.size, b.source, b.user, b.echo, b.sink, b.denied)
case b: TLBundleE => Seq( b.sink )
}
def minTLPayloadWidth(b: TLChannel): Int = Seq(getBodyFields(b), getConstFields(b)).map(_.map(_.getWidth).sum).max
def minTLPayloadWidth(bs: Seq[TLChannel]): Int = bs.map(b => minTLPayloadWidth(b)).max
def minTLPayloadWidth(b: TLBundle): Int = minTLPayloadWidth(Seq(b.a, b.b, b.c, b.d, b.e).map(_.bits))
}
class TLMasterToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val a = Decoupled(new IngressFlit(flitWidth))
val b = Flipped(Decoupled(new EgressFlit(flitWidth)))
val c = Decoupled(new IngressFlit(flitWidth))
val d = Flipped(Decoupled(new EgressFlit(flitWidth)))
val e = Decoupled(new IngressFlit(flitWidth))
}
})
val a = Module(new TLAToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0, sourceStart))
val b = Module(new TLBFromNoC(edgeIn, wideBundle, sourceSize))
val c = Module(new TLCToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 1, sourceStart))
val d = Module(new TLDFromNoC(edgeIn, wideBundle, sourceSize))
val e = Module(new TLEToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 2))
a.io.protocol <> io.tilelink.a
io.tilelink.b <> b.io.protocol
c.io.protocol <> io.tilelink.c
io.tilelink.d <> d.io.protocol
e.io.protocol <> io.tilelink.e
io.flits.a <> a.io.flit
b.io.flit <> io.flits.b
io.flits.c <> c.io.flit
d.io.flit <> io.flits.d
io.flits.e <> e.io.flit
}
class TLMasterACDToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val a = Decoupled(new IngressFlit(flitWidth))
val c = Decoupled(new IngressFlit(flitWidth))
val d = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
io.tilelink := DontCare
val a = Module(new TLAToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0, sourceStart))
val c = Module(new TLCToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 1, sourceStart))
val d = Module(new TLDFromNoC(edgeIn, wideBundle, sourceSize))
a.io.protocol <> io.tilelink.a
c.io.protocol <> io.tilelink.c
io.tilelink.d <> d.io.protocol
io.flits.a <> a.io.flit
io.flits.c <> c.io.flit
d.io.flit <> io.flits.d
}
class TLMasterBEToNoC(
edgeIn: TLEdge, edgesOut: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
slaveToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = Flipped(new TLBundle(wideBundle))
val flits = new Bundle {
val b = Flipped(Decoupled(new EgressFlit(flitWidth)))
val e = Decoupled(new IngressFlit(flitWidth))
}
})
io.tilelink := DontCare
val b = Module(new TLBFromNoC(edgeIn, wideBundle, sourceSize))
val e = Module(new TLEToNoC(edgeIn, edgesOut, wideBundle, (i) => slaveToEgressOffset(i) + 0))
io.tilelink.b <> b.io.protocol
e.io.protocol <> io.tilelink.e
b.io.flit <> io.flits.b
io.flits.e <> e.io.flit
}
class TLSlaveToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val a = Flipped(Decoupled(new EgressFlit(flitWidth)))
val b = Decoupled(new IngressFlit(flitWidth))
val c = Flipped(Decoupled(new EgressFlit(flitWidth)))
val d = Decoupled(new IngressFlit(flitWidth))
val e = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
val a = Module(new TLAFromNoC(edgeOut, wideBundle))
val b = Module(new TLBToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0))
val c = Module(new TLCFromNoC(edgeOut, wideBundle))
val d = Module(new TLDToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 1, sourceStart))
val e = Module(new TLEFromNoC(edgeOut, wideBundle, sourceSize))
io.tilelink.a <> a.io.protocol
b.io.protocol <> io.tilelink.b
io.tilelink.c <> c.io.protocol
d.io.protocol <> io.tilelink.d
io.tilelink.e <> e.io.protocol
a.io.flit <> io.flits.a
io.flits.b <> b.io.flit
c.io.flit <> io.flits.c
io.flits.d <> d.io.flit
e.io.flit <> io.flits.e
}
class TLSlaveACDToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val a = Flipped(Decoupled(new EgressFlit(flitWidth)))
val c = Flipped(Decoupled(new EgressFlit(flitWidth)))
val d = Decoupled(new IngressFlit(flitWidth))
}
})
io.tilelink := DontCare
val a = Module(new TLAFromNoC(edgeOut, wideBundle))
val c = Module(new TLCFromNoC(edgeOut, wideBundle))
val d = Module(new TLDToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0, sourceStart))
io.tilelink.a <> a.io.protocol
io.tilelink.c <> c.io.protocol
d.io.protocol <> io.tilelink.d
a.io.flit <> io.flits.a
c.io.flit <> io.flits.c
io.flits.d <> d.io.flit
}
class TLSlaveBEToNoC(
edgeOut: TLEdge, edgesIn: Seq[TLEdge],
sourceStart: Int, sourceSize: Int,
wideBundle: TLBundleParameters,
masterToEgressOffset: Int => Int,
flitWidth: Int
)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val tilelink = new TLBundle(wideBundle)
val flits = new Bundle {
val b = Decoupled(new IngressFlit(flitWidth))
val e = Flipped(Decoupled(new EgressFlit(flitWidth)))
}
})
io.tilelink := DontCare
val b = Module(new TLBToNoC(edgeOut, edgesIn, wideBundle, (i) => masterToEgressOffset(i) + 0))
val e = Module(new TLEFromNoC(edgeOut, wideBundle, sourceSize))
b.io.protocol <> io.tilelink.b
io.tilelink.e <> e.io.protocol
io.flits.b <> b.io.flit
e.io.flit <> io.flits.e
}
class TileLinkInterconnectInterface(edgesIn: Seq[TLEdge], edgesOut: Seq[TLEdge])(implicit val p: Parameters) extends Bundle {
val in = MixedVec(edgesIn.map { e => Flipped(new TLBundle(e.bundle)) })
val out = MixedVec(edgesOut.map { e => new TLBundle(e.bundle) })
}
trait TileLinkProtocolParams extends ProtocolParams with TLFieldHelper {
def edgesIn: Seq[TLEdge]
def edgesOut: Seq[TLEdge]
def edgeInNodes: Seq[Int]
def edgeOutNodes: Seq[Int]
require(edgesIn.size == edgeInNodes.size && edgesOut.size == edgeOutNodes.size)
def wideBundle = TLBundleParameters.union(edgesIn.map(_.bundle) ++ edgesOut.map(_.bundle))
def genBundle = new TLBundle(wideBundle)
def inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
def outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
val vNetBlocking = (blocker: Int, blockee: Int) => blocker < blockee
def genIO()(implicit p: Parameters): Data = new TileLinkInterconnectInterface(edgesIn, edgesOut)
}
object TLConnect {
def apply[T <: TLBundleBase](l: DecoupledIO[T], r: DecoupledIO[T]) = {
l.valid := r.valid
r.ready := l.ready
l.bits.squeezeAll.waiveAll :<>= r.bits.squeezeAll.waiveAll
}
}
// BEGIN: TileLinkProtocolParams
case class TileLinkABCDEProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]
) extends TileLinkProtocolParams {
// END: TileLinkProtocolParams
val minPayloadWidth = minTLPayloadWidth(new TLBundle(wideBundle))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(3) (u)) ++ edgeOutNodes.map(u => Seq.fill (2) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(2) (u)) ++ edgeOutNodes.map(u => Seq.fill (3) {u})).flatten
val nVirtualNetworks = 5
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val reachable = edgeIn.client.clients.exists { c => edgeOut.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
val probe = edgeIn.client.anySupportProbe && edgeOut.manager.managers.exists(_.regionType >= RegionType.TRACKED)
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (reachable) Some(FlowParams(ii * 3 + 0 , oi * 3 + 0 + edgesIn.size * 2, 4)) else None) ++ // A
(if (probe ) Some(FlowParams(oi * 2 + 0 + edgesIn.size * 3, ii * 2 + 0 , 3)) else None) ++ // B
(if (release ) Some(FlowParams(ii * 3 + 1 , oi * 3 + 1 + edgesIn.size * 2, 2)) else None) ++ // C
(if (reachable) Some(FlowParams(oi * 2 + 1 + edgesIn.size * 3, ii * 2 + 1 , 1)) else None) ++ // D
(if (release ) Some(FlowParams(ii * 3 + 2 , oi * 3 + 2 + edgesIn.size * 2, 0)) else None)) // E
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master = Module(new TLMasterToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 3 + edgesIn.size * 2 + egressOffset,
minPayloadWidth
))
nif_master.io.tilelink := DontCare
nif_master.io.tilelink.a.valid := false.B
nif_master.io.tilelink.c.valid := false.B
nif_master.io.tilelink.e.valid := false.B
TLConnect(nif_master.io.tilelink.a, protocol.in(i).a)
TLConnect(protocol.in(i).d, nif_master.io.tilelink.d)
if (protocol.in(i).params.hasBCE) {
TLConnect(protocol.in(i).b, nif_master.io.tilelink.b)
TLConnect(nif_master.io.tilelink.c, protocol.in(i).c)
TLConnect(nif_master.io.tilelink.e, protocol.in(i).e)
}
ingresses(i * 3 + 0).flit <> nif_master.io.flits.a
ingresses(i * 3 + 1).flit <> nif_master.io.flits.c
ingresses(i * 3 + 2).flit <> nif_master.io.flits.e
nif_master.io.flits.b <> egresses(i * 2 + 0).flit
nif_master.io.flits.d <> egresses(i * 2 + 1).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave = Module(new TLSlaveToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 2 + egressOffset,
minPayloadWidth
))
nif_slave.io.tilelink := DontCare
nif_slave.io.tilelink.b.valid := false.B
nif_slave.io.tilelink.d.valid := false.B
TLConnect(protocol.out(i).a, nif_slave.io.tilelink.a)
TLConnect(nif_slave.io.tilelink.d, protocol.out(i).d)
if (protocol.out(i).params.hasBCE) {
TLConnect(nif_slave.io.tilelink.b, protocol.out(i).b)
TLConnect(protocol.out(i).c, nif_slave.io.tilelink.c)
TLConnect(protocol.out(i).e, nif_slave.io.tilelink.e)
}
ingresses(i * 2 + 0 + edgesIn.size * 3).flit <> nif_slave.io.flits.b
ingresses(i * 2 + 1 + edgesIn.size * 3).flit <> nif_slave.io.flits.d
nif_slave.io.flits.a <> egresses(i * 3 + 0 + edgesIn.size * 2).flit
nif_slave.io.flits.c <> egresses(i * 3 + 1 + edgesIn.size * 2).flit
nif_slave.io.flits.e <> egresses(i * 3 + 2 + edgesIn.size * 2).flit
}
} }
}
}
case class TileLinkACDProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]) extends TileLinkProtocolParams {
val minPayloadWidth = minTLPayloadWidth(Seq(genBundle.a, genBundle.c, genBundle.d).map(_.bits))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(2) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (2) {u})).flatten
val nVirtualNetworks = 3
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val reachable = edgeIn.client.clients.exists { c => edgeOut.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (reachable) Some(FlowParams(ii * 2 + 0 , oi * 2 + 0 + edgesIn.size * 1, 2)) else None) ++ // A
(if (release ) Some(FlowParams(ii * 2 + 1 , oi * 2 + 1 + edgesIn.size * 1, 1)) else None) ++ // C
(if (reachable) Some(FlowParams(oi * 1 + 0 + edgesIn.size * 2, ii * 1 + 0 , 0)) else None)) // D
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
protocol := DontCare
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master_acd = Module(new TLMasterACDToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 2 + edgesIn.size * 1 + egressOffset,
minPayloadWidth
))
nif_master_acd.io.tilelink := DontCare
nif_master_acd.io.tilelink.a.valid := false.B
nif_master_acd.io.tilelink.c.valid := false.B
nif_master_acd.io.tilelink.e.valid := false.B
TLConnect(nif_master_acd.io.tilelink.a, protocol.in(i).a)
TLConnect(protocol.in(i).d, nif_master_acd.io.tilelink.d)
if (protocol.in(i).params.hasBCE) {
TLConnect(nif_master_acd.io.tilelink.c, protocol.in(i).c)
}
ingresses(i * 2 + 0).flit <> nif_master_acd.io.flits.a
ingresses(i * 2 + 1).flit <> nif_master_acd.io.flits.c
nif_master_acd.io.flits.d <> egresses(i * 1 + 0).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave_acd = Module(new TLSlaveACDToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 1 + egressOffset,
minPayloadWidth
))
nif_slave_acd.io.tilelink := DontCare
nif_slave_acd.io.tilelink.b.valid := false.B
nif_slave_acd.io.tilelink.d.valid := false.B
TLConnect(protocol.out(i).a, nif_slave_acd.io.tilelink.a)
TLConnect(nif_slave_acd.io.tilelink.d, protocol.out(i).d)
if (protocol.out(i).params.hasBCE) {
TLConnect(protocol.out(i).c, nif_slave_acd.io.tilelink.c)
}
ingresses(i * 1 + 0 + edgesIn.size * 2).flit <> nif_slave_acd.io.flits.d
nif_slave_acd.io.flits.a <> egresses(i * 2 + 0 + edgesIn.size * 1).flit
nif_slave_acd.io.flits.c <> egresses(i * 2 + 1 + edgesIn.size * 1).flit
}
}}
}
}
case class TileLinkBEProtocolParams(
edgesIn: Seq[TLEdge],
edgesOut: Seq[TLEdge],
edgeInNodes: Seq[Int],
edgeOutNodes: Seq[Int]) extends TileLinkProtocolParams {
val minPayloadWidth = minTLPayloadWidth(Seq(genBundle.b, genBundle.e).map(_.bits))
val ingressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val egressNodes = (edgeInNodes.map(u => Seq.fill(1) (u)) ++ edgeOutNodes.map(u => Seq.fill (1) {u})).flatten
val nVirtualNetworks = 2
val flows = edgesIn.zipWithIndex.map { case (edgeIn, ii) => edgesOut.zipWithIndex.map { case (edgeOut, oi) =>
val probe = edgeIn.client.anySupportProbe && edgeOut.manager.managers.exists(_.regionType >= RegionType.TRACKED)
val release = edgeIn.client.anySupportProbe && edgeOut.manager.anySupportAcquireB
( (if (probe ) Some(FlowParams(oi * 1 + 0 + edgesIn.size * 1, ii * 1 + 0 , 1)) else None) ++ // B
(if (release ) Some(FlowParams(ii * 1 + 0 , oi * 1 + 0 + edgesIn.size * 1, 0)) else None)) // E
}}.flatten.flatten
def interface(terminals: NoCTerminalIO,
ingressOffset: Int, egressOffset: Int, protocol: Data)(implicit p: Parameters) = {
val ingresses = terminals.ingress
val egresses = terminals.egress
protocol match { case protocol: TileLinkInterconnectInterface => {
protocol := DontCare
edgesIn.zipWithIndex.map { case (e,i) =>
val nif_master_be = Module(new TLMasterBEToNoC(
e, edgesOut, inputIdRanges(i).start, inputIdRanges(i).size,
wideBundle,
(s) => s * 1 + edgesIn.size * 1 + egressOffset,
minPayloadWidth
))
nif_master_be.io.tilelink := DontCare
nif_master_be.io.tilelink.a.valid := false.B
nif_master_be.io.tilelink.c.valid := false.B
nif_master_be.io.tilelink.e.valid := false.B
if (protocol.in(i).params.hasBCE) {
TLConnect(protocol.in(i).b, nif_master_be.io.tilelink.b)
TLConnect(nif_master_be.io.tilelink.e, protocol.in(i).e)
}
ingresses(i * 1 + 0).flit <> nif_master_be.io.flits.e
nif_master_be.io.flits.b <> egresses(i * 1 + 0).flit
}
edgesOut.zipWithIndex.map { case (e,i) =>
val nif_slave_be = Module(new TLSlaveBEToNoC(
e, edgesIn, outputIdRanges(i).start, outputIdRanges(i).size,
wideBundle,
(s) => s * 1 + egressOffset,
minPayloadWidth
))
nif_slave_be.io.tilelink := DontCare
nif_slave_be.io.tilelink.b.valid := false.B
nif_slave_be.io.tilelink.d.valid := false.B
if (protocol.out(i).params.hasBCE) {
TLConnect(protocol.out(i).e, nif_slave_be.io.tilelink.e)
TLConnect(nif_slave_be.io.tilelink.b, protocol.out(i).b)
}
ingresses(i * 1 + 0 + edgesIn.size * 1).flit <> nif_slave_be.io.flits.b
nif_slave_be.io.flits.e <> egresses(i * 1 + 0 + edgesIn.size * 1).flit
}
}}
}
}
abstract class TLNoCLike(implicit p: Parameters) extends LazyModule {
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"TLNoC (data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
// TileLink NoC does not preserve FIFO-ness, masters to this NoC should instantiate FIFOFixers
port.managers map { manager => manager.v1copy(fifoId = None) }
}
)
}
)
}
abstract class TLNoCModuleImp(outer: LazyModule) extends LazyModuleImp(outer) {
val edgesIn: Seq[TLEdge]
val edgesOut: Seq[TLEdge]
val nodeMapping: DiplomaticNetworkNodeMapping
val nocName: String
lazy val inNames = nodeMapping.genUniqueName(edgesIn.map(_.master.masters.map(_.name)))
lazy val outNames = nodeMapping.genUniqueName(edgesOut.map(_.slave.slaves.map(_.name)))
lazy val edgeInNodes = nodeMapping.getNodesIn(inNames)
lazy val edgeOutNodes = nodeMapping.getNodesOut(outNames)
def printNodeMappings() {
println(s"Constellation: TLNoC $nocName inwards mapping:")
for ((n, i) <- inNames zip edgeInNodes) {
val node = i.map(_.toString).getOrElse("X")
println(s" $node <- $n")
}
println(s"Constellation: TLNoC $nocName outwards mapping:")
for ((n, i) <- outNames zip edgeOutNodes) {
val node = i.map(_.toString).getOrElse("X")
println(s" $node <- $n")
}
}
}
trait TLNoCParams
// Instantiates a private TLNoC. Replaces the TLXbar
// BEGIN: TLNoCParams
case class SimpleTLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping,
nocParams: NoCParams = NoCParams(),
) extends TLNoCParams
class TLNoC(params: SimpleTLNoCParams, name: String = "test", inlineNoC: Boolean = false)(implicit p: Parameters) extends TLNoCLike {
// END: TLNoCParams
override def shouldBeInlined = inlineNoC
lazy val module = new TLNoCModuleImp(this) {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
printNodeMappings()
val protocolParams = TileLinkABCDEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.nocParams.copy(hasCtrl = false, nocName=name, inlineNoC = inlineNoC),
Seq(protocolParams),
inlineNoC = inlineNoC
)))
noc.io.protocol(0) match {
case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) => l <> r }
(io_out zip protocol.out).foreach { case (l,r) => l <> r }
}
}
}
}
case class SplitACDxBETLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping,
acdNoCParams: NoCParams = NoCParams(),
beNoCParams: NoCParams = NoCParams(),
beDivision: Int = 2
) extends TLNoCParams
class TLSplitACDxBENoC(params: SplitACDxBETLNoCParams, name: String = "test", inlineNoC: Boolean = false)(implicit p: Parameters) extends TLNoCLike {
override def shouldBeInlined = inlineNoC
lazy val module = new TLNoCModuleImp(this) {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
printNodeMappings()
val acdProtocolParams = TileLinkACDProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val beProtocolParams = TileLinkBEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
val acd_noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.acdNoCParams.copy(hasCtrl = false, nocName=s"${name}_acd", inlineNoC = inlineNoC),
Seq(acdProtocolParams),
inlineNoC = inlineNoC
)))
val be_noc = Module(new ProtocolNoC(ProtocolNoCParams(
params.beNoCParams.copy(hasCtrl = false, nocName=s"${name}_be", inlineNoC = inlineNoC),
Seq(beProtocolParams),
widthDivision = params.beDivision,
inlineNoC = inlineNoC
)))
acd_noc.io.protocol(0) match { case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) =>
l := DontCare
l.a <> r.a
l.c <> r.c
l.d <> r.d
}
(io_out zip protocol.out).foreach { case (l,r) =>
r := DontCare
l.a <> r.a
l.c <> r.c
l.d <> r.d
}
}}
be_noc.io.protocol(0) match { case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) =>
l := DontCare
l.b <> r.b
l.e <> r.e
}
(io_out zip protocol.out).foreach { case (l,r) =>
r := DontCare
l.b <> r.b
l.e <> r.e
}
}}
}
}
case class GlobalTLNoCParams(
nodeMappings: DiplomaticNetworkNodeMapping
) extends TLNoCParams
// Maps this interconnect onto a global NoC
class TLGlobalNoC(params: GlobalTLNoCParams, name: String = "test")(implicit p: Parameters) extends TLNoCLike {
lazy val module = new TLNoCModuleImp(this) with CanAttachToGlobalNoC {
val (io_in, edgesIn) = node.in.unzip
val (io_out, edgesOut) = node.out.unzip
val nodeMapping = params.nodeMappings
val nocName = name
val protocolParams = TileLinkABCDEProtocolParams(
edgesIn = edgesIn,
edgesOut = edgesOut,
edgeInNodes = edgeInNodes.flatten,
edgeOutNodes = edgeOutNodes.flatten
)
printNodeMappings()
val io_global = IO(Flipped(protocolParams.genIO()))
io_global match {
case protocol: TileLinkInterconnectInterface => {
(protocol.in zip io_in).foreach { case (l,r) => l <> r }
(io_out zip protocol.out).foreach { case (l,r) => l <> r }
}
}
}
}
| module ProtocolNoC( // @[Protocol.scala:70:7]
input clock, // @[Protocol.scala:70:7]
input reset, // @[Protocol.scala:70:7]
output io_protocol_0_in_8_a_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_8_a_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_8_a_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_8_a_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_8_a_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_8_a_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_8_a_bits_address, // @[Protocol.scala:71:14]
input [7:0] io_protocol_0_in_8_a_bits_mask, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_8_a_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_8_a_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_8_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_8_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_8_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_8_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_8_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_8_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_8_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_8_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_8_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_8_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_8_c_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_8_c_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_8_c_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_8_c_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_8_c_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_8_c_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_8_c_bits_address, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_8_c_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_8_c_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_8_d_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_8_d_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_8_d_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_8_d_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_8_d_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_8_d_bits_source, // @[Protocol.scala:71:14]
output [4:0] io_protocol_0_in_8_d_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_8_d_bits_denied, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_8_d_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_8_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_8_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_8_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_8_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_a_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_7_a_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_7_a_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_7_a_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_7_a_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_7_a_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_7_a_bits_address, // @[Protocol.scala:71:14]
input [7:0] io_protocol_0_in_7_a_bits_mask, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_7_a_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_7_a_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_7_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_7_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_7_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_7_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_7_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_7_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_7_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_7_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_c_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_7_c_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_7_c_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_7_c_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_7_c_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_7_c_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_7_c_bits_address, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_7_c_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_7_c_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_7_d_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_d_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_7_d_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_7_d_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_7_d_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_7_d_bits_source, // @[Protocol.scala:71:14]
output [4:0] io_protocol_0_in_7_d_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_d_bits_denied, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_7_d_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_7_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_7_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_7_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_a_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_6_a_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_6_a_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_6_a_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_6_a_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_6_a_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_6_a_bits_address, // @[Protocol.scala:71:14]
input [7:0] io_protocol_0_in_6_a_bits_mask, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_6_a_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_6_a_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_6_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_6_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_6_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_6_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_6_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_6_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_6_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_6_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_c_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_6_c_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_6_c_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_6_c_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_6_c_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_6_c_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_6_c_bits_address, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_6_c_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_6_c_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_6_d_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_d_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_6_d_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_6_d_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_6_d_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_6_d_bits_source, // @[Protocol.scala:71:14]
output [4:0] io_protocol_0_in_6_d_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_d_bits_denied, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_6_d_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_6_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_6_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_6_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_a_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_5_a_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_5_a_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_5_a_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_5_a_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_5_a_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_5_a_bits_address, // @[Protocol.scala:71:14]
input [7:0] io_protocol_0_in_5_a_bits_mask, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_5_a_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_5_a_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_5_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_5_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_5_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_5_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_5_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_5_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_5_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_5_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_c_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_5_c_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_5_c_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_5_c_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_5_c_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_5_c_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_5_c_bits_address, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_5_c_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_5_c_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_5_d_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_d_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_5_d_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_5_d_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_5_d_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_5_d_bits_source, // @[Protocol.scala:71:14]
output [4:0] io_protocol_0_in_5_d_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_d_bits_denied, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_5_d_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_5_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_5_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_5_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_a_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_4_a_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_4_a_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_4_a_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_4_a_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_4_a_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_4_a_bits_address, // @[Protocol.scala:71:14]
input [7:0] io_protocol_0_in_4_a_bits_mask, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_4_a_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_4_a_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_4_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_4_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_4_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_4_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_4_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_4_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_4_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_4_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_c_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_4_c_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_4_c_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_4_c_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_4_c_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_4_c_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_4_c_bits_address, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_4_c_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_4_c_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_4_d_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_d_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_4_d_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_4_d_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_4_d_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_4_d_bits_source, // @[Protocol.scala:71:14]
output [4:0] io_protocol_0_in_4_d_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_d_bits_denied, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_4_d_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_4_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_4_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_4_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_a_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_3_a_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_3_a_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_3_a_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_3_a_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_3_a_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_3_a_bits_address, // @[Protocol.scala:71:14]
input [7:0] io_protocol_0_in_3_a_bits_mask, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_3_a_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_3_a_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_3_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_3_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_3_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_3_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_3_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_3_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_3_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_3_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_c_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_3_c_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_3_c_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_3_c_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_3_c_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_3_c_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_3_c_bits_address, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_3_c_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_3_c_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_3_d_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_d_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_3_d_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_3_d_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_3_d_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_3_d_bits_source, // @[Protocol.scala:71:14]
output [4:0] io_protocol_0_in_3_d_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_d_bits_denied, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_3_d_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_3_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_3_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_3_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_a_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_2_a_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_2_a_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_2_a_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_2_a_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_2_a_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_2_a_bits_address, // @[Protocol.scala:71:14]
input [7:0] io_protocol_0_in_2_a_bits_mask, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_2_a_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_2_a_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_2_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_2_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_2_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_2_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_2_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_2_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_2_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_2_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_c_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_2_c_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_2_c_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_2_c_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_2_c_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_2_c_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_2_c_bits_address, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_2_c_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_2_c_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_2_d_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_d_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_2_d_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_2_d_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_2_d_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_2_d_bits_source, // @[Protocol.scala:71:14]
output [4:0] io_protocol_0_in_2_d_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_d_bits_denied, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_2_d_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_2_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_2_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_2_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_a_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_1_a_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_1_a_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_1_a_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_1_a_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_1_a_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_1_a_bits_address, // @[Protocol.scala:71:14]
input [7:0] io_protocol_0_in_1_a_bits_mask, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_1_a_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_1_a_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_1_b_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_b_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_1_b_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_1_b_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_1_b_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_1_b_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_in_1_b_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_in_1_b_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_1_b_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_b_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_c_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_1_c_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_1_c_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_1_c_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_1_c_bits_size, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_in_1_c_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_1_c_bits_address, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_1_c_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_1_c_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_1_d_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_d_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_1_d_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_1_d_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_1_d_bits_size, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_1_d_bits_source, // @[Protocol.scala:71:14]
output [4:0] io_protocol_0_in_1_d_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_d_bits_denied, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_1_d_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_in_1_e_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_1_e_valid, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_1_e_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_0_a_ready, // @[Protocol.scala:71:14]
input io_protocol_0_in_0_a_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_0_a_bits_opcode, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_in_0_a_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_in_0_a_bits_size, // @[Protocol.scala:71:14]
input [4:0] io_protocol_0_in_0_a_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_in_0_a_bits_address, // @[Protocol.scala:71:14]
input [7:0] io_protocol_0_in_0_a_bits_mask, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_in_0_a_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_in_0_a_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_in_0_d_ready, // @[Protocol.scala:71:14]
output io_protocol_0_in_0_d_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_in_0_d_bits_opcode, // @[Protocol.scala:71:14]
output [1:0] io_protocol_0_in_0_d_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_in_0_d_bits_size, // @[Protocol.scala:71:14]
output [4:0] io_protocol_0_in_0_d_bits_source, // @[Protocol.scala:71:14]
output [4:0] io_protocol_0_in_0_d_bits_sink, // @[Protocol.scala:71:14]
output io_protocol_0_in_0_d_bits_denied, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_in_0_d_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_in_0_d_bits_corrupt, // @[Protocol.scala:71:14]
input io_protocol_0_out_4_a_ready, // @[Protocol.scala:71:14]
output io_protocol_0_out_4_a_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_4_a_bits_opcode, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_4_a_bits_param, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_4_a_bits_size, // @[Protocol.scala:71:14]
output [5:0] io_protocol_0_out_4_a_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_out_4_a_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_out_4_a_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_out_4_a_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_out_4_a_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_4_b_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_4_b_valid, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_4_b_bits_param, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_4_b_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_out_4_b_bits_address, // @[Protocol.scala:71:14]
input io_protocol_0_out_4_c_ready, // @[Protocol.scala:71:14]
output io_protocol_0_out_4_c_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_4_c_bits_opcode, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_4_c_bits_param, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_4_c_bits_size, // @[Protocol.scala:71:14]
output [5:0] io_protocol_0_out_4_c_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_out_4_c_bits_address, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_out_4_c_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_out_4_c_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_4_d_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_4_d_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_4_d_bits_opcode, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_4_d_bits_param, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_4_d_bits_size, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_4_d_bits_source, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_4_d_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_out_4_d_bits_denied, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_out_4_d_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_out_4_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_4_e_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_4_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_out_3_a_ready, // @[Protocol.scala:71:14]
output io_protocol_0_out_3_a_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_3_a_bits_opcode, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_3_a_bits_param, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_3_a_bits_size, // @[Protocol.scala:71:14]
output [5:0] io_protocol_0_out_3_a_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_out_3_a_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_out_3_a_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_out_3_a_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_out_3_a_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_3_b_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_3_b_valid, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_3_b_bits_param, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_3_b_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_out_3_b_bits_address, // @[Protocol.scala:71:14]
input io_protocol_0_out_3_c_ready, // @[Protocol.scala:71:14]
output io_protocol_0_out_3_c_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_3_c_bits_opcode, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_3_c_bits_param, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_3_c_bits_size, // @[Protocol.scala:71:14]
output [5:0] io_protocol_0_out_3_c_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_out_3_c_bits_address, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_out_3_c_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_out_3_c_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_3_d_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_3_d_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_3_d_bits_opcode, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_3_d_bits_param, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_3_d_bits_size, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_3_d_bits_source, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_3_d_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_out_3_d_bits_denied, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_out_3_d_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_out_3_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_3_e_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_3_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_out_2_a_ready, // @[Protocol.scala:71:14]
output io_protocol_0_out_2_a_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_2_a_bits_opcode, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_2_a_bits_param, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_2_a_bits_size, // @[Protocol.scala:71:14]
output [5:0] io_protocol_0_out_2_a_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_out_2_a_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_out_2_a_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_out_2_a_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_out_2_a_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_2_b_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_2_b_valid, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_2_b_bits_param, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_2_b_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_out_2_b_bits_address, // @[Protocol.scala:71:14]
input io_protocol_0_out_2_c_ready, // @[Protocol.scala:71:14]
output io_protocol_0_out_2_c_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_2_c_bits_opcode, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_2_c_bits_param, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_2_c_bits_size, // @[Protocol.scala:71:14]
output [5:0] io_protocol_0_out_2_c_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_out_2_c_bits_address, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_out_2_c_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_out_2_c_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_2_d_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_2_d_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_2_d_bits_opcode, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_2_d_bits_param, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_2_d_bits_size, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_2_d_bits_source, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_2_d_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_out_2_d_bits_denied, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_out_2_d_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_out_2_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_2_e_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_2_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_out_1_a_ready, // @[Protocol.scala:71:14]
output io_protocol_0_out_1_a_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_1_a_bits_opcode, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_1_a_bits_param, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_1_a_bits_size, // @[Protocol.scala:71:14]
output [5:0] io_protocol_0_out_1_a_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_out_1_a_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_out_1_a_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_out_1_a_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_out_1_a_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_1_b_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_1_b_valid, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_1_b_bits_param, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_1_b_bits_source, // @[Protocol.scala:71:14]
input [31:0] io_protocol_0_out_1_b_bits_address, // @[Protocol.scala:71:14]
input io_protocol_0_out_1_c_ready, // @[Protocol.scala:71:14]
output io_protocol_0_out_1_c_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_1_c_bits_opcode, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_1_c_bits_param, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_1_c_bits_size, // @[Protocol.scala:71:14]
output [5:0] io_protocol_0_out_1_c_bits_source, // @[Protocol.scala:71:14]
output [31:0] io_protocol_0_out_1_c_bits_address, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_out_1_c_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_out_1_c_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_1_d_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_1_d_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_1_d_bits_opcode, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_1_d_bits_param, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_1_d_bits_size, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_1_d_bits_source, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_1_d_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_out_1_d_bits_denied, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_out_1_d_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_out_1_d_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_1_e_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_1_e_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_out_0_a_ready, // @[Protocol.scala:71:14]
output io_protocol_0_out_0_a_valid, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_0_a_bits_opcode, // @[Protocol.scala:71:14]
output [2:0] io_protocol_0_out_0_a_bits_param, // @[Protocol.scala:71:14]
output [3:0] io_protocol_0_out_0_a_bits_size, // @[Protocol.scala:71:14]
output [5:0] io_protocol_0_out_0_a_bits_source, // @[Protocol.scala:71:14]
output [28:0] io_protocol_0_out_0_a_bits_address, // @[Protocol.scala:71:14]
output [7:0] io_protocol_0_out_0_a_bits_mask, // @[Protocol.scala:71:14]
output [63:0] io_protocol_0_out_0_a_bits_data, // @[Protocol.scala:71:14]
output io_protocol_0_out_0_a_bits_corrupt, // @[Protocol.scala:71:14]
output io_protocol_0_out_0_d_ready, // @[Protocol.scala:71:14]
input io_protocol_0_out_0_d_valid, // @[Protocol.scala:71:14]
input [2:0] io_protocol_0_out_0_d_bits_opcode, // @[Protocol.scala:71:14]
input [1:0] io_protocol_0_out_0_d_bits_param, // @[Protocol.scala:71:14]
input [3:0] io_protocol_0_out_0_d_bits_size, // @[Protocol.scala:71:14]
input [5:0] io_protocol_0_out_0_d_bits_source, // @[Protocol.scala:71:14]
input io_protocol_0_out_0_d_bits_sink, // @[Protocol.scala:71:14]
input io_protocol_0_out_0_d_bits_denied, // @[Protocol.scala:71:14]
input [63:0] io_protocol_0_out_0_d_bits_data, // @[Protocol.scala:71:14]
input io_protocol_0_out_0_d_bits_corrupt // @[Protocol.scala:71:14]
);
wire [3:0] _nif_slave_4_io_tilelink_a_bits_size; // @[Tilelink.scala:303:31]
wire [3:0] _nif_slave_4_io_tilelink_c_bits_size; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_4_io_tilelink_e_bits_sink; // @[Tilelink.scala:303:31]
wire _nif_slave_4_io_flits_a_ready; // @[Tilelink.scala:303:31]
wire _nif_slave_4_io_flits_b_valid; // @[Tilelink.scala:303:31]
wire _nif_slave_4_io_flits_b_bits_head; // @[Tilelink.scala:303:31]
wire _nif_slave_4_io_flits_b_bits_tail; // @[Tilelink.scala:303:31]
wire [72:0] _nif_slave_4_io_flits_b_bits_payload; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_4_io_flits_b_bits_egress_id; // @[Tilelink.scala:303:31]
wire _nif_slave_4_io_flits_c_ready; // @[Tilelink.scala:303:31]
wire _nif_slave_4_io_flits_d_valid; // @[Tilelink.scala:303:31]
wire _nif_slave_4_io_flits_d_bits_head; // @[Tilelink.scala:303:31]
wire _nif_slave_4_io_flits_d_bits_tail; // @[Tilelink.scala:303:31]
wire [72:0] _nif_slave_4_io_flits_d_bits_payload; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_4_io_flits_d_bits_egress_id; // @[Tilelink.scala:303:31]
wire [3:0] _nif_slave_3_io_tilelink_a_bits_size; // @[Tilelink.scala:303:31]
wire [3:0] _nif_slave_3_io_tilelink_c_bits_size; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_3_io_tilelink_e_bits_sink; // @[Tilelink.scala:303:31]
wire _nif_slave_3_io_flits_a_ready; // @[Tilelink.scala:303:31]
wire _nif_slave_3_io_flits_b_valid; // @[Tilelink.scala:303:31]
wire _nif_slave_3_io_flits_b_bits_head; // @[Tilelink.scala:303:31]
wire _nif_slave_3_io_flits_b_bits_tail; // @[Tilelink.scala:303:31]
wire [72:0] _nif_slave_3_io_flits_b_bits_payload; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_3_io_flits_b_bits_egress_id; // @[Tilelink.scala:303:31]
wire _nif_slave_3_io_flits_c_ready; // @[Tilelink.scala:303:31]
wire _nif_slave_3_io_flits_d_valid; // @[Tilelink.scala:303:31]
wire _nif_slave_3_io_flits_d_bits_head; // @[Tilelink.scala:303:31]
wire _nif_slave_3_io_flits_d_bits_tail; // @[Tilelink.scala:303:31]
wire [72:0] _nif_slave_3_io_flits_d_bits_payload; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_3_io_flits_d_bits_egress_id; // @[Tilelink.scala:303:31]
wire [3:0] _nif_slave_2_io_tilelink_a_bits_size; // @[Tilelink.scala:303:31]
wire [3:0] _nif_slave_2_io_tilelink_c_bits_size; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_2_io_tilelink_e_bits_sink; // @[Tilelink.scala:303:31]
wire _nif_slave_2_io_flits_a_ready; // @[Tilelink.scala:303:31]
wire _nif_slave_2_io_flits_b_valid; // @[Tilelink.scala:303:31]
wire _nif_slave_2_io_flits_b_bits_head; // @[Tilelink.scala:303:31]
wire _nif_slave_2_io_flits_b_bits_tail; // @[Tilelink.scala:303:31]
wire [72:0] _nif_slave_2_io_flits_b_bits_payload; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_2_io_flits_b_bits_egress_id; // @[Tilelink.scala:303:31]
wire _nif_slave_2_io_flits_c_ready; // @[Tilelink.scala:303:31]
wire _nif_slave_2_io_flits_d_valid; // @[Tilelink.scala:303:31]
wire _nif_slave_2_io_flits_d_bits_head; // @[Tilelink.scala:303:31]
wire _nif_slave_2_io_flits_d_bits_tail; // @[Tilelink.scala:303:31]
wire [72:0] _nif_slave_2_io_flits_d_bits_payload; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_2_io_flits_d_bits_egress_id; // @[Tilelink.scala:303:31]
wire [3:0] _nif_slave_1_io_tilelink_a_bits_size; // @[Tilelink.scala:303:31]
wire [3:0] _nif_slave_1_io_tilelink_c_bits_size; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_1_io_tilelink_e_bits_sink; // @[Tilelink.scala:303:31]
wire _nif_slave_1_io_flits_a_ready; // @[Tilelink.scala:303:31]
wire _nif_slave_1_io_flits_b_valid; // @[Tilelink.scala:303:31]
wire _nif_slave_1_io_flits_b_bits_head; // @[Tilelink.scala:303:31]
wire _nif_slave_1_io_flits_b_bits_tail; // @[Tilelink.scala:303:31]
wire [72:0] _nif_slave_1_io_flits_b_bits_payload; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_1_io_flits_b_bits_egress_id; // @[Tilelink.scala:303:31]
wire _nif_slave_1_io_flits_c_ready; // @[Tilelink.scala:303:31]
wire _nif_slave_1_io_flits_d_valid; // @[Tilelink.scala:303:31]
wire _nif_slave_1_io_flits_d_bits_head; // @[Tilelink.scala:303:31]
wire _nif_slave_1_io_flits_d_bits_tail; // @[Tilelink.scala:303:31]
wire [72:0] _nif_slave_1_io_flits_d_bits_payload; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_1_io_flits_d_bits_egress_id; // @[Tilelink.scala:303:31]
wire [31:0] _nif_slave_io_tilelink_a_bits_address; // @[Tilelink.scala:303:31]
wire _nif_slave_io_flits_a_ready; // @[Tilelink.scala:303:31]
wire _nif_slave_io_flits_b_valid; // @[Tilelink.scala:303:31]
wire _nif_slave_io_flits_c_ready; // @[Tilelink.scala:303:31]
wire _nif_slave_io_flits_d_valid; // @[Tilelink.scala:303:31]
wire _nif_slave_io_flits_d_bits_head; // @[Tilelink.scala:303:31]
wire _nif_slave_io_flits_d_bits_tail; // @[Tilelink.scala:303:31]
wire [72:0] _nif_slave_io_flits_d_bits_payload; // @[Tilelink.scala:303:31]
wire [4:0] _nif_slave_io_flits_d_bits_egress_id; // @[Tilelink.scala:303:31]
wire _nif_slave_io_flits_e_ready; // @[Tilelink.scala:303:31]
wire [5:0] _nif_master_8_io_tilelink_b_bits_source; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_8_io_tilelink_d_bits_source; // @[Tilelink.scala:276:32]
wire _nif_master_8_io_flits_a_valid; // @[Tilelink.scala:276:32]
wire _nif_master_8_io_flits_a_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_8_io_flits_a_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_8_io_flits_a_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_8_io_flits_a_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_8_io_flits_b_ready; // @[Tilelink.scala:276:32]
wire _nif_master_8_io_flits_c_valid; // @[Tilelink.scala:276:32]
wire _nif_master_8_io_flits_c_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_8_io_flits_c_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_8_io_flits_c_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_8_io_flits_c_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_8_io_flits_d_ready; // @[Tilelink.scala:276:32]
wire _nif_master_8_io_flits_e_valid; // @[Tilelink.scala:276:32]
wire _nif_master_8_io_flits_e_bits_head; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_8_io_flits_e_bits_payload; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_8_io_flits_e_bits_egress_id; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_7_io_tilelink_b_bits_source; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_7_io_tilelink_d_bits_source; // @[Tilelink.scala:276:32]
wire _nif_master_7_io_flits_a_valid; // @[Tilelink.scala:276:32]
wire _nif_master_7_io_flits_a_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_7_io_flits_a_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_7_io_flits_a_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_7_io_flits_a_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_7_io_flits_b_ready; // @[Tilelink.scala:276:32]
wire _nif_master_7_io_flits_c_valid; // @[Tilelink.scala:276:32]
wire _nif_master_7_io_flits_c_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_7_io_flits_c_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_7_io_flits_c_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_7_io_flits_c_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_7_io_flits_d_ready; // @[Tilelink.scala:276:32]
wire _nif_master_7_io_flits_e_valid; // @[Tilelink.scala:276:32]
wire _nif_master_7_io_flits_e_bits_head; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_7_io_flits_e_bits_payload; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_7_io_flits_e_bits_egress_id; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_6_io_tilelink_b_bits_source; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_6_io_tilelink_d_bits_source; // @[Tilelink.scala:276:32]
wire _nif_master_6_io_flits_a_valid; // @[Tilelink.scala:276:32]
wire _nif_master_6_io_flits_a_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_6_io_flits_a_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_6_io_flits_a_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_6_io_flits_a_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_6_io_flits_b_ready; // @[Tilelink.scala:276:32]
wire _nif_master_6_io_flits_c_valid; // @[Tilelink.scala:276:32]
wire _nif_master_6_io_flits_c_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_6_io_flits_c_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_6_io_flits_c_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_6_io_flits_c_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_6_io_flits_d_ready; // @[Tilelink.scala:276:32]
wire _nif_master_6_io_flits_e_valid; // @[Tilelink.scala:276:32]
wire _nif_master_6_io_flits_e_bits_head; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_6_io_flits_e_bits_payload; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_6_io_flits_e_bits_egress_id; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_5_io_tilelink_b_bits_source; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_5_io_tilelink_d_bits_source; // @[Tilelink.scala:276:32]
wire _nif_master_5_io_flits_a_valid; // @[Tilelink.scala:276:32]
wire _nif_master_5_io_flits_a_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_5_io_flits_a_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_5_io_flits_a_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_5_io_flits_a_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_5_io_flits_b_ready; // @[Tilelink.scala:276:32]
wire _nif_master_5_io_flits_c_valid; // @[Tilelink.scala:276:32]
wire _nif_master_5_io_flits_c_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_5_io_flits_c_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_5_io_flits_c_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_5_io_flits_c_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_5_io_flits_d_ready; // @[Tilelink.scala:276:32]
wire _nif_master_5_io_flits_e_valid; // @[Tilelink.scala:276:32]
wire _nif_master_5_io_flits_e_bits_head; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_5_io_flits_e_bits_payload; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_5_io_flits_e_bits_egress_id; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_4_io_tilelink_b_bits_source; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_4_io_tilelink_d_bits_source; // @[Tilelink.scala:276:32]
wire _nif_master_4_io_flits_a_valid; // @[Tilelink.scala:276:32]
wire _nif_master_4_io_flits_a_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_4_io_flits_a_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_4_io_flits_a_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_4_io_flits_a_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_4_io_flits_b_ready; // @[Tilelink.scala:276:32]
wire _nif_master_4_io_flits_c_valid; // @[Tilelink.scala:276:32]
wire _nif_master_4_io_flits_c_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_4_io_flits_c_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_4_io_flits_c_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_4_io_flits_c_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_4_io_flits_d_ready; // @[Tilelink.scala:276:32]
wire _nif_master_4_io_flits_e_valid; // @[Tilelink.scala:276:32]
wire _nif_master_4_io_flits_e_bits_head; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_4_io_flits_e_bits_payload; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_4_io_flits_e_bits_egress_id; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_3_io_tilelink_b_bits_source; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_3_io_tilelink_d_bits_source; // @[Tilelink.scala:276:32]
wire _nif_master_3_io_flits_a_valid; // @[Tilelink.scala:276:32]
wire _nif_master_3_io_flits_a_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_3_io_flits_a_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_3_io_flits_a_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_3_io_flits_a_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_3_io_flits_b_ready; // @[Tilelink.scala:276:32]
wire _nif_master_3_io_flits_c_valid; // @[Tilelink.scala:276:32]
wire _nif_master_3_io_flits_c_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_3_io_flits_c_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_3_io_flits_c_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_3_io_flits_c_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_3_io_flits_d_ready; // @[Tilelink.scala:276:32]
wire _nif_master_3_io_flits_e_valid; // @[Tilelink.scala:276:32]
wire _nif_master_3_io_flits_e_bits_head; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_3_io_flits_e_bits_payload; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_3_io_flits_e_bits_egress_id; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_2_io_tilelink_b_bits_source; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_2_io_tilelink_d_bits_source; // @[Tilelink.scala:276:32]
wire _nif_master_2_io_flits_a_valid; // @[Tilelink.scala:276:32]
wire _nif_master_2_io_flits_a_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_2_io_flits_a_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_2_io_flits_a_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_2_io_flits_a_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_2_io_flits_b_ready; // @[Tilelink.scala:276:32]
wire _nif_master_2_io_flits_c_valid; // @[Tilelink.scala:276:32]
wire _nif_master_2_io_flits_c_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_2_io_flits_c_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_2_io_flits_c_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_2_io_flits_c_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_2_io_flits_d_ready; // @[Tilelink.scala:276:32]
wire _nif_master_2_io_flits_e_valid; // @[Tilelink.scala:276:32]
wire _nif_master_2_io_flits_e_bits_head; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_2_io_flits_e_bits_payload; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_2_io_flits_e_bits_egress_id; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_1_io_tilelink_b_bits_source; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_1_io_tilelink_d_bits_source; // @[Tilelink.scala:276:32]
wire _nif_master_1_io_flits_a_valid; // @[Tilelink.scala:276:32]
wire _nif_master_1_io_flits_a_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_1_io_flits_a_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_1_io_flits_a_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_1_io_flits_a_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_1_io_flits_b_ready; // @[Tilelink.scala:276:32]
wire _nif_master_1_io_flits_c_valid; // @[Tilelink.scala:276:32]
wire _nif_master_1_io_flits_c_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_1_io_flits_c_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_1_io_flits_c_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_1_io_flits_c_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_1_io_flits_d_ready; // @[Tilelink.scala:276:32]
wire _nif_master_1_io_flits_e_valid; // @[Tilelink.scala:276:32]
wire _nif_master_1_io_flits_e_bits_head; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_1_io_flits_e_bits_payload; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_1_io_flits_e_bits_egress_id; // @[Tilelink.scala:276:32]
wire [5:0] _nif_master_io_tilelink_d_bits_source; // @[Tilelink.scala:276:32]
wire _nif_master_io_flits_a_valid; // @[Tilelink.scala:276:32]
wire _nif_master_io_flits_a_bits_head; // @[Tilelink.scala:276:32]
wire _nif_master_io_flits_a_bits_tail; // @[Tilelink.scala:276:32]
wire [72:0] _nif_master_io_flits_a_bits_payload; // @[Tilelink.scala:276:32]
wire [4:0] _nif_master_io_flits_a_bits_egress_id; // @[Tilelink.scala:276:32]
wire _nif_master_io_flits_b_ready; // @[Tilelink.scala:276:32]
wire _nif_master_io_flits_c_valid; // @[Tilelink.scala:276:32]
wire _nif_master_io_flits_d_ready; // @[Tilelink.scala:276:32]
wire _nif_master_io_flits_e_valid; // @[Tilelink.scala:276:32]
wire _noc_io_ingress_36_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_35_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_34_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_33_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_32_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_31_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_30_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_29_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_28_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_26_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_25_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_24_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_23_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_22_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_21_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_20_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_19_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_18_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_17_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_16_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_15_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_14_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_13_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_12_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_11_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_10_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_9_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_8_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_7_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_6_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_5_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_4_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_3_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_ingress_0_flit_ready; // @[Protocol.scala:116:19]
wire _noc_io_egress_32_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_32_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_32_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_32_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_31_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_31_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_31_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_31_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_30_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_30_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_30_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_30_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_29_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_29_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_29_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_29_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_28_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_28_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_28_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_28_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_27_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_27_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_27_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_27_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_26_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_26_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_26_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_26_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_25_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_25_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_25_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_25_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_24_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_24_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_24_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_24_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_23_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_23_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_23_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_23_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_22_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_22_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_22_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_22_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_21_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_21_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_21_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_21_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_20_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_20_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_20_flit_bits_tail; // @[Protocol.scala:116:19]
wire _noc_io_egress_19_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_19_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_19_flit_bits_tail; // @[Protocol.scala:116:19]
wire _noc_io_egress_18_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_18_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_18_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_18_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_17_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_17_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_17_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_17_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_16_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_16_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_16_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_16_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_15_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_15_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_15_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_15_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_14_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_14_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_14_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_14_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_13_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_13_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_13_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_13_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_12_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_12_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_12_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_12_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_11_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_11_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_11_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_11_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_10_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_10_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_10_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_10_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_9_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_9_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_9_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_9_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_8_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_8_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_8_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_8_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_7_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_7_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_7_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_7_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_6_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_6_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_6_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_6_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_5_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_5_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_5_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_5_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_4_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_4_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_4_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_4_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_3_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_3_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_3_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_3_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_2_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_2_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_2_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_2_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_1_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_1_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_1_flit_bits_tail; // @[Protocol.scala:116:19]
wire [72:0] _noc_io_egress_1_flit_bits_payload; // @[Protocol.scala:116:19]
wire _noc_io_egress_0_flit_valid; // @[Protocol.scala:116:19]
wire _noc_io_egress_0_flit_bits_head; // @[Protocol.scala:116:19]
wire _noc_io_egress_0_flit_bits_tail; // @[Protocol.scala:116:19]
NoC noc ( // @[Protocol.scala:116:19]
.clock (clock),
.reset (reset),
.io_ingress_36_flit_ready (_noc_io_ingress_36_flit_ready),
.io_ingress_36_flit_valid (_nif_slave_4_io_flits_d_valid), // @[Tilelink.scala:303:31]
.io_ingress_36_flit_bits_head (_nif_slave_4_io_flits_d_bits_head), // @[Tilelink.scala:303:31]
.io_ingress_36_flit_bits_tail (_nif_slave_4_io_flits_d_bits_tail), // @[Tilelink.scala:303:31]
.io_ingress_36_flit_bits_payload (_nif_slave_4_io_flits_d_bits_payload), // @[Tilelink.scala:303:31]
.io_ingress_36_flit_bits_egress_id (_nif_slave_4_io_flits_d_bits_egress_id), // @[Tilelink.scala:303:31]
.io_ingress_35_flit_ready (_noc_io_ingress_35_flit_ready),
.io_ingress_35_flit_valid (_nif_slave_4_io_flits_b_valid), // @[Tilelink.scala:303:31]
.io_ingress_35_flit_bits_head (_nif_slave_4_io_flits_b_bits_head), // @[Tilelink.scala:303:31]
.io_ingress_35_flit_bits_tail (_nif_slave_4_io_flits_b_bits_tail), // @[Tilelink.scala:303:31]
.io_ingress_35_flit_bits_payload (_nif_slave_4_io_flits_b_bits_payload), // @[Tilelink.scala:303:31]
.io_ingress_35_flit_bits_egress_id (_nif_slave_4_io_flits_b_bits_egress_id), // @[Tilelink.scala:303:31]
.io_ingress_34_flit_ready (_noc_io_ingress_34_flit_ready),
.io_ingress_34_flit_valid (_nif_slave_3_io_flits_d_valid), // @[Tilelink.scala:303:31]
.io_ingress_34_flit_bits_head (_nif_slave_3_io_flits_d_bits_head), // @[Tilelink.scala:303:31]
.io_ingress_34_flit_bits_tail (_nif_slave_3_io_flits_d_bits_tail), // @[Tilelink.scala:303:31]
.io_ingress_34_flit_bits_payload (_nif_slave_3_io_flits_d_bits_payload), // @[Tilelink.scala:303:31]
.io_ingress_34_flit_bits_egress_id (_nif_slave_3_io_flits_d_bits_egress_id), // @[Tilelink.scala:303:31]
.io_ingress_33_flit_ready (_noc_io_ingress_33_flit_ready),
.io_ingress_33_flit_valid (_nif_slave_3_io_flits_b_valid), // @[Tilelink.scala:303:31]
.io_ingress_33_flit_bits_head (_nif_slave_3_io_flits_b_bits_head), // @[Tilelink.scala:303:31]
.io_ingress_33_flit_bits_tail (_nif_slave_3_io_flits_b_bits_tail), // @[Tilelink.scala:303:31]
.io_ingress_33_flit_bits_payload (_nif_slave_3_io_flits_b_bits_payload), // @[Tilelink.scala:303:31]
.io_ingress_33_flit_bits_egress_id (_nif_slave_3_io_flits_b_bits_egress_id), // @[Tilelink.scala:303:31]
.io_ingress_32_flit_ready (_noc_io_ingress_32_flit_ready),
.io_ingress_32_flit_valid (_nif_slave_2_io_flits_d_valid), // @[Tilelink.scala:303:31]
.io_ingress_32_flit_bits_head (_nif_slave_2_io_flits_d_bits_head), // @[Tilelink.scala:303:31]
.io_ingress_32_flit_bits_tail (_nif_slave_2_io_flits_d_bits_tail), // @[Tilelink.scala:303:31]
.io_ingress_32_flit_bits_payload (_nif_slave_2_io_flits_d_bits_payload), // @[Tilelink.scala:303:31]
.io_ingress_32_flit_bits_egress_id (_nif_slave_2_io_flits_d_bits_egress_id), // @[Tilelink.scala:303:31]
.io_ingress_31_flit_ready (_noc_io_ingress_31_flit_ready),
.io_ingress_31_flit_valid (_nif_slave_2_io_flits_b_valid), // @[Tilelink.scala:303:31]
.io_ingress_31_flit_bits_head (_nif_slave_2_io_flits_b_bits_head), // @[Tilelink.scala:303:31]
.io_ingress_31_flit_bits_tail (_nif_slave_2_io_flits_b_bits_tail), // @[Tilelink.scala:303:31]
.io_ingress_31_flit_bits_payload (_nif_slave_2_io_flits_b_bits_payload), // @[Tilelink.scala:303:31]
.io_ingress_31_flit_bits_egress_id (_nif_slave_2_io_flits_b_bits_egress_id), // @[Tilelink.scala:303:31]
.io_ingress_30_flit_ready (_noc_io_ingress_30_flit_ready),
.io_ingress_30_flit_valid (_nif_slave_1_io_flits_d_valid), // @[Tilelink.scala:303:31]
.io_ingress_30_flit_bits_head (_nif_slave_1_io_flits_d_bits_head), // @[Tilelink.scala:303:31]
.io_ingress_30_flit_bits_tail (_nif_slave_1_io_flits_d_bits_tail), // @[Tilelink.scala:303:31]
.io_ingress_30_flit_bits_payload (_nif_slave_1_io_flits_d_bits_payload), // @[Tilelink.scala:303:31]
.io_ingress_30_flit_bits_egress_id (_nif_slave_1_io_flits_d_bits_egress_id), // @[Tilelink.scala:303:31]
.io_ingress_29_flit_ready (_noc_io_ingress_29_flit_ready),
.io_ingress_29_flit_valid (_nif_slave_1_io_flits_b_valid), // @[Tilelink.scala:303:31]
.io_ingress_29_flit_bits_head (_nif_slave_1_io_flits_b_bits_head), // @[Tilelink.scala:303:31]
.io_ingress_29_flit_bits_tail (_nif_slave_1_io_flits_b_bits_tail), // @[Tilelink.scala:303:31]
.io_ingress_29_flit_bits_payload (_nif_slave_1_io_flits_b_bits_payload), // @[Tilelink.scala:303:31]
.io_ingress_29_flit_bits_egress_id (_nif_slave_1_io_flits_b_bits_egress_id), // @[Tilelink.scala:303:31]
.io_ingress_28_flit_ready (_noc_io_ingress_28_flit_ready),
.io_ingress_28_flit_valid (_nif_slave_io_flits_d_valid), // @[Tilelink.scala:303:31]
.io_ingress_28_flit_bits_head (_nif_slave_io_flits_d_bits_head), // @[Tilelink.scala:303:31]
.io_ingress_28_flit_bits_tail (_nif_slave_io_flits_d_bits_tail), // @[Tilelink.scala:303:31]
.io_ingress_28_flit_bits_payload (_nif_slave_io_flits_d_bits_payload), // @[Tilelink.scala:303:31]
.io_ingress_28_flit_bits_egress_id (_nif_slave_io_flits_d_bits_egress_id), // @[Tilelink.scala:303:31]
.io_ingress_27_flit_valid (_nif_slave_io_flits_b_valid), // @[Tilelink.scala:303:31]
.io_ingress_26_flit_ready (_noc_io_ingress_26_flit_ready),
.io_ingress_26_flit_valid (_nif_master_8_io_flits_e_valid), // @[Tilelink.scala:276:32]
.io_ingress_26_flit_bits_head (_nif_master_8_io_flits_e_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_26_flit_bits_payload (_nif_master_8_io_flits_e_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_26_flit_bits_egress_id (_nif_master_8_io_flits_e_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_25_flit_ready (_noc_io_ingress_25_flit_ready),
.io_ingress_25_flit_valid (_nif_master_8_io_flits_c_valid), // @[Tilelink.scala:276:32]
.io_ingress_25_flit_bits_head (_nif_master_8_io_flits_c_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_25_flit_bits_tail (_nif_master_8_io_flits_c_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_25_flit_bits_payload (_nif_master_8_io_flits_c_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_25_flit_bits_egress_id (_nif_master_8_io_flits_c_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_24_flit_ready (_noc_io_ingress_24_flit_ready),
.io_ingress_24_flit_valid (_nif_master_8_io_flits_a_valid), // @[Tilelink.scala:276:32]
.io_ingress_24_flit_bits_head (_nif_master_8_io_flits_a_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_24_flit_bits_tail (_nif_master_8_io_flits_a_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_24_flit_bits_payload (_nif_master_8_io_flits_a_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_24_flit_bits_egress_id (_nif_master_8_io_flits_a_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_23_flit_ready (_noc_io_ingress_23_flit_ready),
.io_ingress_23_flit_valid (_nif_master_7_io_flits_e_valid), // @[Tilelink.scala:276:32]
.io_ingress_23_flit_bits_head (_nif_master_7_io_flits_e_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_23_flit_bits_payload (_nif_master_7_io_flits_e_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_23_flit_bits_egress_id (_nif_master_7_io_flits_e_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_22_flit_ready (_noc_io_ingress_22_flit_ready),
.io_ingress_22_flit_valid (_nif_master_7_io_flits_c_valid), // @[Tilelink.scala:276:32]
.io_ingress_22_flit_bits_head (_nif_master_7_io_flits_c_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_22_flit_bits_tail (_nif_master_7_io_flits_c_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_22_flit_bits_payload (_nif_master_7_io_flits_c_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_22_flit_bits_egress_id (_nif_master_7_io_flits_c_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_21_flit_ready (_noc_io_ingress_21_flit_ready),
.io_ingress_21_flit_valid (_nif_master_7_io_flits_a_valid), // @[Tilelink.scala:276:32]
.io_ingress_21_flit_bits_head (_nif_master_7_io_flits_a_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_21_flit_bits_tail (_nif_master_7_io_flits_a_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_21_flit_bits_payload (_nif_master_7_io_flits_a_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_21_flit_bits_egress_id (_nif_master_7_io_flits_a_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_20_flit_ready (_noc_io_ingress_20_flit_ready),
.io_ingress_20_flit_valid (_nif_master_6_io_flits_e_valid), // @[Tilelink.scala:276:32]
.io_ingress_20_flit_bits_head (_nif_master_6_io_flits_e_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_20_flit_bits_payload (_nif_master_6_io_flits_e_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_20_flit_bits_egress_id (_nif_master_6_io_flits_e_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_19_flit_ready (_noc_io_ingress_19_flit_ready),
.io_ingress_19_flit_valid (_nif_master_6_io_flits_c_valid), // @[Tilelink.scala:276:32]
.io_ingress_19_flit_bits_head (_nif_master_6_io_flits_c_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_19_flit_bits_tail (_nif_master_6_io_flits_c_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_19_flit_bits_payload (_nif_master_6_io_flits_c_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_19_flit_bits_egress_id (_nif_master_6_io_flits_c_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_18_flit_ready (_noc_io_ingress_18_flit_ready),
.io_ingress_18_flit_valid (_nif_master_6_io_flits_a_valid), // @[Tilelink.scala:276:32]
.io_ingress_18_flit_bits_head (_nif_master_6_io_flits_a_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_18_flit_bits_tail (_nif_master_6_io_flits_a_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_18_flit_bits_payload (_nif_master_6_io_flits_a_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_18_flit_bits_egress_id (_nif_master_6_io_flits_a_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_17_flit_ready (_noc_io_ingress_17_flit_ready),
.io_ingress_17_flit_valid (_nif_master_5_io_flits_e_valid), // @[Tilelink.scala:276:32]
.io_ingress_17_flit_bits_head (_nif_master_5_io_flits_e_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_17_flit_bits_payload (_nif_master_5_io_flits_e_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_17_flit_bits_egress_id (_nif_master_5_io_flits_e_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_16_flit_ready (_noc_io_ingress_16_flit_ready),
.io_ingress_16_flit_valid (_nif_master_5_io_flits_c_valid), // @[Tilelink.scala:276:32]
.io_ingress_16_flit_bits_head (_nif_master_5_io_flits_c_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_16_flit_bits_tail (_nif_master_5_io_flits_c_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_16_flit_bits_payload (_nif_master_5_io_flits_c_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_16_flit_bits_egress_id (_nif_master_5_io_flits_c_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_15_flit_ready (_noc_io_ingress_15_flit_ready),
.io_ingress_15_flit_valid (_nif_master_5_io_flits_a_valid), // @[Tilelink.scala:276:32]
.io_ingress_15_flit_bits_head (_nif_master_5_io_flits_a_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_15_flit_bits_tail (_nif_master_5_io_flits_a_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_15_flit_bits_payload (_nif_master_5_io_flits_a_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_15_flit_bits_egress_id (_nif_master_5_io_flits_a_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_14_flit_ready (_noc_io_ingress_14_flit_ready),
.io_ingress_14_flit_valid (_nif_master_4_io_flits_e_valid), // @[Tilelink.scala:276:32]
.io_ingress_14_flit_bits_head (_nif_master_4_io_flits_e_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_14_flit_bits_payload (_nif_master_4_io_flits_e_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_14_flit_bits_egress_id (_nif_master_4_io_flits_e_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_13_flit_ready (_noc_io_ingress_13_flit_ready),
.io_ingress_13_flit_valid (_nif_master_4_io_flits_c_valid), // @[Tilelink.scala:276:32]
.io_ingress_13_flit_bits_head (_nif_master_4_io_flits_c_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_13_flit_bits_tail (_nif_master_4_io_flits_c_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_13_flit_bits_payload (_nif_master_4_io_flits_c_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_13_flit_bits_egress_id (_nif_master_4_io_flits_c_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_12_flit_ready (_noc_io_ingress_12_flit_ready),
.io_ingress_12_flit_valid (_nif_master_4_io_flits_a_valid), // @[Tilelink.scala:276:32]
.io_ingress_12_flit_bits_head (_nif_master_4_io_flits_a_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_12_flit_bits_tail (_nif_master_4_io_flits_a_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_12_flit_bits_payload (_nif_master_4_io_flits_a_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_12_flit_bits_egress_id (_nif_master_4_io_flits_a_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_11_flit_ready (_noc_io_ingress_11_flit_ready),
.io_ingress_11_flit_valid (_nif_master_3_io_flits_e_valid), // @[Tilelink.scala:276:32]
.io_ingress_11_flit_bits_head (_nif_master_3_io_flits_e_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_11_flit_bits_payload (_nif_master_3_io_flits_e_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_11_flit_bits_egress_id (_nif_master_3_io_flits_e_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_10_flit_ready (_noc_io_ingress_10_flit_ready),
.io_ingress_10_flit_valid (_nif_master_3_io_flits_c_valid), // @[Tilelink.scala:276:32]
.io_ingress_10_flit_bits_head (_nif_master_3_io_flits_c_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_10_flit_bits_tail (_nif_master_3_io_flits_c_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_10_flit_bits_payload (_nif_master_3_io_flits_c_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_10_flit_bits_egress_id (_nif_master_3_io_flits_c_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_9_flit_ready (_noc_io_ingress_9_flit_ready),
.io_ingress_9_flit_valid (_nif_master_3_io_flits_a_valid), // @[Tilelink.scala:276:32]
.io_ingress_9_flit_bits_head (_nif_master_3_io_flits_a_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_9_flit_bits_tail (_nif_master_3_io_flits_a_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_9_flit_bits_payload (_nif_master_3_io_flits_a_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_9_flit_bits_egress_id (_nif_master_3_io_flits_a_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_8_flit_ready (_noc_io_ingress_8_flit_ready),
.io_ingress_8_flit_valid (_nif_master_2_io_flits_e_valid), // @[Tilelink.scala:276:32]
.io_ingress_8_flit_bits_head (_nif_master_2_io_flits_e_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_8_flit_bits_payload (_nif_master_2_io_flits_e_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_8_flit_bits_egress_id (_nif_master_2_io_flits_e_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_7_flit_ready (_noc_io_ingress_7_flit_ready),
.io_ingress_7_flit_valid (_nif_master_2_io_flits_c_valid), // @[Tilelink.scala:276:32]
.io_ingress_7_flit_bits_head (_nif_master_2_io_flits_c_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_7_flit_bits_tail (_nif_master_2_io_flits_c_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_7_flit_bits_payload (_nif_master_2_io_flits_c_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_7_flit_bits_egress_id (_nif_master_2_io_flits_c_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_6_flit_ready (_noc_io_ingress_6_flit_ready),
.io_ingress_6_flit_valid (_nif_master_2_io_flits_a_valid), // @[Tilelink.scala:276:32]
.io_ingress_6_flit_bits_head (_nif_master_2_io_flits_a_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_6_flit_bits_tail (_nif_master_2_io_flits_a_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_6_flit_bits_payload (_nif_master_2_io_flits_a_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_6_flit_bits_egress_id (_nif_master_2_io_flits_a_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_5_flit_ready (_noc_io_ingress_5_flit_ready),
.io_ingress_5_flit_valid (_nif_master_1_io_flits_e_valid), // @[Tilelink.scala:276:32]
.io_ingress_5_flit_bits_head (_nif_master_1_io_flits_e_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_5_flit_bits_payload (_nif_master_1_io_flits_e_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_5_flit_bits_egress_id (_nif_master_1_io_flits_e_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_4_flit_ready (_noc_io_ingress_4_flit_ready),
.io_ingress_4_flit_valid (_nif_master_1_io_flits_c_valid), // @[Tilelink.scala:276:32]
.io_ingress_4_flit_bits_head (_nif_master_1_io_flits_c_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_4_flit_bits_tail (_nif_master_1_io_flits_c_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_4_flit_bits_payload (_nif_master_1_io_flits_c_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_4_flit_bits_egress_id (_nif_master_1_io_flits_c_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_3_flit_ready (_noc_io_ingress_3_flit_ready),
.io_ingress_3_flit_valid (_nif_master_1_io_flits_a_valid), // @[Tilelink.scala:276:32]
.io_ingress_3_flit_bits_head (_nif_master_1_io_flits_a_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_3_flit_bits_tail (_nif_master_1_io_flits_a_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_3_flit_bits_payload (_nif_master_1_io_flits_a_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_3_flit_bits_egress_id (_nif_master_1_io_flits_a_bits_egress_id), // @[Tilelink.scala:276:32]
.io_ingress_2_flit_valid (_nif_master_io_flits_e_valid), // @[Tilelink.scala:276:32]
.io_ingress_1_flit_valid (_nif_master_io_flits_c_valid), // @[Tilelink.scala:276:32]
.io_ingress_0_flit_ready (_noc_io_ingress_0_flit_ready),
.io_ingress_0_flit_valid (_nif_master_io_flits_a_valid), // @[Tilelink.scala:276:32]
.io_ingress_0_flit_bits_head (_nif_master_io_flits_a_bits_head), // @[Tilelink.scala:276:32]
.io_ingress_0_flit_bits_tail (_nif_master_io_flits_a_bits_tail), // @[Tilelink.scala:276:32]
.io_ingress_0_flit_bits_payload (_nif_master_io_flits_a_bits_payload), // @[Tilelink.scala:276:32]
.io_ingress_0_flit_bits_egress_id (_nif_master_io_flits_a_bits_egress_id), // @[Tilelink.scala:276:32]
.io_egress_32_flit_valid (_noc_io_egress_32_flit_valid),
.io_egress_32_flit_bits_head (_noc_io_egress_32_flit_bits_head),
.io_egress_32_flit_bits_tail (_noc_io_egress_32_flit_bits_tail),
.io_egress_32_flit_bits_payload (_noc_io_egress_32_flit_bits_payload),
.io_egress_31_flit_ready (_nif_slave_4_io_flits_c_ready), // @[Tilelink.scala:303:31]
.io_egress_31_flit_valid (_noc_io_egress_31_flit_valid),
.io_egress_31_flit_bits_head (_noc_io_egress_31_flit_bits_head),
.io_egress_31_flit_bits_tail (_noc_io_egress_31_flit_bits_tail),
.io_egress_31_flit_bits_payload (_noc_io_egress_31_flit_bits_payload),
.io_egress_30_flit_ready (_nif_slave_4_io_flits_a_ready), // @[Tilelink.scala:303:31]
.io_egress_30_flit_valid (_noc_io_egress_30_flit_valid),
.io_egress_30_flit_bits_head (_noc_io_egress_30_flit_bits_head),
.io_egress_30_flit_bits_tail (_noc_io_egress_30_flit_bits_tail),
.io_egress_30_flit_bits_payload (_noc_io_egress_30_flit_bits_payload),
.io_egress_29_flit_valid (_noc_io_egress_29_flit_valid),
.io_egress_29_flit_bits_head (_noc_io_egress_29_flit_bits_head),
.io_egress_29_flit_bits_tail (_noc_io_egress_29_flit_bits_tail),
.io_egress_29_flit_bits_payload (_noc_io_egress_29_flit_bits_payload),
.io_egress_28_flit_ready (_nif_slave_3_io_flits_c_ready), // @[Tilelink.scala:303:31]
.io_egress_28_flit_valid (_noc_io_egress_28_flit_valid),
.io_egress_28_flit_bits_head (_noc_io_egress_28_flit_bits_head),
.io_egress_28_flit_bits_tail (_noc_io_egress_28_flit_bits_tail),
.io_egress_28_flit_bits_payload (_noc_io_egress_28_flit_bits_payload),
.io_egress_27_flit_ready (_nif_slave_3_io_flits_a_ready), // @[Tilelink.scala:303:31]
.io_egress_27_flit_valid (_noc_io_egress_27_flit_valid),
.io_egress_27_flit_bits_head (_noc_io_egress_27_flit_bits_head),
.io_egress_27_flit_bits_tail (_noc_io_egress_27_flit_bits_tail),
.io_egress_27_flit_bits_payload (_noc_io_egress_27_flit_bits_payload),
.io_egress_26_flit_valid (_noc_io_egress_26_flit_valid),
.io_egress_26_flit_bits_head (_noc_io_egress_26_flit_bits_head),
.io_egress_26_flit_bits_tail (_noc_io_egress_26_flit_bits_tail),
.io_egress_26_flit_bits_payload (_noc_io_egress_26_flit_bits_payload),
.io_egress_25_flit_ready (_nif_slave_2_io_flits_c_ready), // @[Tilelink.scala:303:31]
.io_egress_25_flit_valid (_noc_io_egress_25_flit_valid),
.io_egress_25_flit_bits_head (_noc_io_egress_25_flit_bits_head),
.io_egress_25_flit_bits_tail (_noc_io_egress_25_flit_bits_tail),
.io_egress_25_flit_bits_payload (_noc_io_egress_25_flit_bits_payload),
.io_egress_24_flit_ready (_nif_slave_2_io_flits_a_ready), // @[Tilelink.scala:303:31]
.io_egress_24_flit_valid (_noc_io_egress_24_flit_valid),
.io_egress_24_flit_bits_head (_noc_io_egress_24_flit_bits_head),
.io_egress_24_flit_bits_tail (_noc_io_egress_24_flit_bits_tail),
.io_egress_24_flit_bits_payload (_noc_io_egress_24_flit_bits_payload),
.io_egress_23_flit_valid (_noc_io_egress_23_flit_valid),
.io_egress_23_flit_bits_head (_noc_io_egress_23_flit_bits_head),
.io_egress_23_flit_bits_tail (_noc_io_egress_23_flit_bits_tail),
.io_egress_23_flit_bits_payload (_noc_io_egress_23_flit_bits_payload),
.io_egress_22_flit_ready (_nif_slave_1_io_flits_c_ready), // @[Tilelink.scala:303:31]
.io_egress_22_flit_valid (_noc_io_egress_22_flit_valid),
.io_egress_22_flit_bits_head (_noc_io_egress_22_flit_bits_head),
.io_egress_22_flit_bits_tail (_noc_io_egress_22_flit_bits_tail),
.io_egress_22_flit_bits_payload (_noc_io_egress_22_flit_bits_payload),
.io_egress_21_flit_ready (_nif_slave_1_io_flits_a_ready), // @[Tilelink.scala:303:31]
.io_egress_21_flit_valid (_noc_io_egress_21_flit_valid),
.io_egress_21_flit_bits_head (_noc_io_egress_21_flit_bits_head),
.io_egress_21_flit_bits_tail (_noc_io_egress_21_flit_bits_tail),
.io_egress_21_flit_bits_payload (_noc_io_egress_21_flit_bits_payload),
.io_egress_20_flit_ready (_nif_slave_io_flits_e_ready), // @[Tilelink.scala:303:31]
.io_egress_20_flit_valid (_noc_io_egress_20_flit_valid),
.io_egress_20_flit_bits_head (_noc_io_egress_20_flit_bits_head),
.io_egress_20_flit_bits_tail (_noc_io_egress_20_flit_bits_tail),
.io_egress_19_flit_ready (_nif_slave_io_flits_c_ready), // @[Tilelink.scala:303:31]
.io_egress_19_flit_valid (_noc_io_egress_19_flit_valid),
.io_egress_19_flit_bits_head (_noc_io_egress_19_flit_bits_head),
.io_egress_19_flit_bits_tail (_noc_io_egress_19_flit_bits_tail),
.io_egress_18_flit_ready (_nif_slave_io_flits_a_ready), // @[Tilelink.scala:303:31]
.io_egress_18_flit_valid (_noc_io_egress_18_flit_valid),
.io_egress_18_flit_bits_head (_noc_io_egress_18_flit_bits_head),
.io_egress_18_flit_bits_tail (_noc_io_egress_18_flit_bits_tail),
.io_egress_18_flit_bits_payload (_noc_io_egress_18_flit_bits_payload),
.io_egress_17_flit_ready (_nif_master_8_io_flits_d_ready), // @[Tilelink.scala:276:32]
.io_egress_17_flit_valid (_noc_io_egress_17_flit_valid),
.io_egress_17_flit_bits_head (_noc_io_egress_17_flit_bits_head),
.io_egress_17_flit_bits_tail (_noc_io_egress_17_flit_bits_tail),
.io_egress_17_flit_bits_payload (_noc_io_egress_17_flit_bits_payload),
.io_egress_16_flit_ready (_nif_master_8_io_flits_b_ready), // @[Tilelink.scala:276:32]
.io_egress_16_flit_valid (_noc_io_egress_16_flit_valid),
.io_egress_16_flit_bits_head (_noc_io_egress_16_flit_bits_head),
.io_egress_16_flit_bits_tail (_noc_io_egress_16_flit_bits_tail),
.io_egress_16_flit_bits_payload (_noc_io_egress_16_flit_bits_payload),
.io_egress_15_flit_ready (_nif_master_7_io_flits_d_ready), // @[Tilelink.scala:276:32]
.io_egress_15_flit_valid (_noc_io_egress_15_flit_valid),
.io_egress_15_flit_bits_head (_noc_io_egress_15_flit_bits_head),
.io_egress_15_flit_bits_tail (_noc_io_egress_15_flit_bits_tail),
.io_egress_15_flit_bits_payload (_noc_io_egress_15_flit_bits_payload),
.io_egress_14_flit_ready (_nif_master_7_io_flits_b_ready), // @[Tilelink.scala:276:32]
.io_egress_14_flit_valid (_noc_io_egress_14_flit_valid),
.io_egress_14_flit_bits_head (_noc_io_egress_14_flit_bits_head),
.io_egress_14_flit_bits_tail (_noc_io_egress_14_flit_bits_tail),
.io_egress_14_flit_bits_payload (_noc_io_egress_14_flit_bits_payload),
.io_egress_13_flit_ready (_nif_master_6_io_flits_d_ready), // @[Tilelink.scala:276:32]
.io_egress_13_flit_valid (_noc_io_egress_13_flit_valid),
.io_egress_13_flit_bits_head (_noc_io_egress_13_flit_bits_head),
.io_egress_13_flit_bits_tail (_noc_io_egress_13_flit_bits_tail),
.io_egress_13_flit_bits_payload (_noc_io_egress_13_flit_bits_payload),
.io_egress_12_flit_ready (_nif_master_6_io_flits_b_ready), // @[Tilelink.scala:276:32]
.io_egress_12_flit_valid (_noc_io_egress_12_flit_valid),
.io_egress_12_flit_bits_head (_noc_io_egress_12_flit_bits_head),
.io_egress_12_flit_bits_tail (_noc_io_egress_12_flit_bits_tail),
.io_egress_12_flit_bits_payload (_noc_io_egress_12_flit_bits_payload),
.io_egress_11_flit_ready (_nif_master_5_io_flits_d_ready), // @[Tilelink.scala:276:32]
.io_egress_11_flit_valid (_noc_io_egress_11_flit_valid),
.io_egress_11_flit_bits_head (_noc_io_egress_11_flit_bits_head),
.io_egress_11_flit_bits_tail (_noc_io_egress_11_flit_bits_tail),
.io_egress_11_flit_bits_payload (_noc_io_egress_11_flit_bits_payload),
.io_egress_10_flit_ready (_nif_master_5_io_flits_b_ready), // @[Tilelink.scala:276:32]
.io_egress_10_flit_valid (_noc_io_egress_10_flit_valid),
.io_egress_10_flit_bits_head (_noc_io_egress_10_flit_bits_head),
.io_egress_10_flit_bits_tail (_noc_io_egress_10_flit_bits_tail),
.io_egress_10_flit_bits_payload (_noc_io_egress_10_flit_bits_payload),
.io_egress_9_flit_ready (_nif_master_4_io_flits_d_ready), // @[Tilelink.scala:276:32]
.io_egress_9_flit_valid (_noc_io_egress_9_flit_valid),
.io_egress_9_flit_bits_head (_noc_io_egress_9_flit_bits_head),
.io_egress_9_flit_bits_tail (_noc_io_egress_9_flit_bits_tail),
.io_egress_9_flit_bits_payload (_noc_io_egress_9_flit_bits_payload),
.io_egress_8_flit_ready (_nif_master_4_io_flits_b_ready), // @[Tilelink.scala:276:32]
.io_egress_8_flit_valid (_noc_io_egress_8_flit_valid),
.io_egress_8_flit_bits_head (_noc_io_egress_8_flit_bits_head),
.io_egress_8_flit_bits_tail (_noc_io_egress_8_flit_bits_tail),
.io_egress_8_flit_bits_payload (_noc_io_egress_8_flit_bits_payload),
.io_egress_7_flit_ready (_nif_master_3_io_flits_d_ready), // @[Tilelink.scala:276:32]
.io_egress_7_flit_valid (_noc_io_egress_7_flit_valid),
.io_egress_7_flit_bits_head (_noc_io_egress_7_flit_bits_head),
.io_egress_7_flit_bits_tail (_noc_io_egress_7_flit_bits_tail),
.io_egress_7_flit_bits_payload (_noc_io_egress_7_flit_bits_payload),
.io_egress_6_flit_ready (_nif_master_3_io_flits_b_ready), // @[Tilelink.scala:276:32]
.io_egress_6_flit_valid (_noc_io_egress_6_flit_valid),
.io_egress_6_flit_bits_head (_noc_io_egress_6_flit_bits_head),
.io_egress_6_flit_bits_tail (_noc_io_egress_6_flit_bits_tail),
.io_egress_6_flit_bits_payload (_noc_io_egress_6_flit_bits_payload),
.io_egress_5_flit_ready (_nif_master_2_io_flits_d_ready), // @[Tilelink.scala:276:32]
.io_egress_5_flit_valid (_noc_io_egress_5_flit_valid),
.io_egress_5_flit_bits_head (_noc_io_egress_5_flit_bits_head),
.io_egress_5_flit_bits_tail (_noc_io_egress_5_flit_bits_tail),
.io_egress_5_flit_bits_payload (_noc_io_egress_5_flit_bits_payload),
.io_egress_4_flit_ready (_nif_master_2_io_flits_b_ready), // @[Tilelink.scala:276:32]
.io_egress_4_flit_valid (_noc_io_egress_4_flit_valid),
.io_egress_4_flit_bits_head (_noc_io_egress_4_flit_bits_head),
.io_egress_4_flit_bits_tail (_noc_io_egress_4_flit_bits_tail),
.io_egress_4_flit_bits_payload (_noc_io_egress_4_flit_bits_payload),
.io_egress_3_flit_ready (_nif_master_1_io_flits_d_ready), // @[Tilelink.scala:276:32]
.io_egress_3_flit_valid (_noc_io_egress_3_flit_valid),
.io_egress_3_flit_bits_head (_noc_io_egress_3_flit_bits_head),
.io_egress_3_flit_bits_tail (_noc_io_egress_3_flit_bits_tail),
.io_egress_3_flit_bits_payload (_noc_io_egress_3_flit_bits_payload),
.io_egress_2_flit_ready (_nif_master_1_io_flits_b_ready), // @[Tilelink.scala:276:32]
.io_egress_2_flit_valid (_noc_io_egress_2_flit_valid),
.io_egress_2_flit_bits_head (_noc_io_egress_2_flit_bits_head),
.io_egress_2_flit_bits_tail (_noc_io_egress_2_flit_bits_tail),
.io_egress_2_flit_bits_payload (_noc_io_egress_2_flit_bits_payload),
.io_egress_1_flit_ready (_nif_master_io_flits_d_ready), // @[Tilelink.scala:276:32]
.io_egress_1_flit_valid (_noc_io_egress_1_flit_valid),
.io_egress_1_flit_bits_head (_noc_io_egress_1_flit_bits_head),
.io_egress_1_flit_bits_tail (_noc_io_egress_1_flit_bits_tail),
.io_egress_1_flit_bits_payload (_noc_io_egress_1_flit_bits_payload),
.io_egress_0_flit_ready (_nif_master_io_flits_b_ready), // @[Tilelink.scala:276:32]
.io_egress_0_flit_valid (_noc_io_egress_0_flit_valid),
.io_egress_0_flit_bits_head (_noc_io_egress_0_flit_bits_head),
.io_egress_0_flit_bits_tail (_noc_io_egress_0_flit_bits_tail),
.io_router_clocks_0_clock (clock),
.io_router_clocks_0_reset (reset),
.io_router_clocks_1_clock (clock),
.io_router_clocks_1_reset (reset),
.io_router_clocks_2_clock (clock),
.io_router_clocks_2_reset (reset),
.io_router_clocks_3_clock (clock),
.io_router_clocks_3_reset (reset),
.io_router_clocks_4_clock (clock),
.io_router_clocks_4_reset (reset),
.io_router_clocks_5_clock (clock),
.io_router_clocks_5_reset (reset),
.io_router_clocks_6_clock (clock),
.io_router_clocks_6_reset (reset),
.io_router_clocks_7_clock (clock),
.io_router_clocks_7_reset (reset),
.io_router_clocks_8_clock (clock),
.io_router_clocks_8_reset (reset),
.io_router_clocks_9_clock (clock),
.io_router_clocks_9_reset (reset),
.io_router_clocks_10_clock (clock),
.io_router_clocks_10_reset (reset),
.io_router_clocks_11_clock (clock),
.io_router_clocks_11_reset (reset),
.io_router_clocks_13_clock (clock),
.io_router_clocks_13_reset (reset),
.io_router_clocks_14_clock (clock),
.io_router_clocks_14_reset (reset),
.io_router_clocks_16_clock (clock),
.io_router_clocks_16_reset (reset),
.io_router_clocks_17_clock (clock),
.io_router_clocks_17_reset (reset),
.io_router_clocks_18_clock (clock),
.io_router_clocks_18_reset (reset),
.io_router_clocks_19_clock (clock),
.io_router_clocks_19_reset (reset),
.io_router_clocks_20_clock (clock),
.io_router_clocks_20_reset (reset),
.io_router_clocks_21_clock (clock),
.io_router_clocks_21_reset (reset),
.io_router_clocks_22_clock (clock),
.io_router_clocks_22_reset (reset),
.io_router_clocks_23_clock (clock),
.io_router_clocks_23_reset (reset),
.io_router_clocks_24_clock (clock),
.io_router_clocks_24_reset (reset),
.io_router_clocks_25_clock (clock),
.io_router_clocks_25_reset (reset),
.io_router_clocks_26_clock (clock),
.io_router_clocks_26_reset (reset),
.io_router_clocks_27_clock (clock),
.io_router_clocks_27_reset (reset),
.io_router_clocks_29_clock (clock),
.io_router_clocks_29_reset (reset),
.io_router_clocks_30_clock (clock),
.io_router_clocks_30_reset (reset),
.io_router_clocks_31_clock (clock),
.io_router_clocks_31_reset (reset)
); // @[Protocol.scala:116:19]
TLMasterToNoC nif_master ( // @[Tilelink.scala:276:32]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_in_0_a_ready),
.io_tilelink_a_valid (io_protocol_0_in_0_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_in_0_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_in_0_a_bits_param),
.io_tilelink_a_bits_size (io_protocol_0_in_0_a_bits_size),
.io_tilelink_a_bits_source ({1'h0, io_protocol_0_in_0_a_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_a_bits_address (io_protocol_0_in_0_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_in_0_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_in_0_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_in_0_a_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_in_0_d_ready),
.io_tilelink_d_valid (io_protocol_0_in_0_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_in_0_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_in_0_d_bits_param),
.io_tilelink_d_bits_size (io_protocol_0_in_0_d_bits_size),
.io_tilelink_d_bits_source (_nif_master_io_tilelink_d_bits_source),
.io_tilelink_d_bits_sink (io_protocol_0_in_0_d_bits_sink),
.io_tilelink_d_bits_denied (io_protocol_0_in_0_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_in_0_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_in_0_d_bits_corrupt),
.io_flits_a_ready (_noc_io_ingress_0_flit_ready), // @[Protocol.scala:116:19]
.io_flits_a_valid (_nif_master_io_flits_a_valid),
.io_flits_a_bits_head (_nif_master_io_flits_a_bits_head),
.io_flits_a_bits_tail (_nif_master_io_flits_a_bits_tail),
.io_flits_a_bits_payload (_nif_master_io_flits_a_bits_payload),
.io_flits_a_bits_egress_id (_nif_master_io_flits_a_bits_egress_id),
.io_flits_b_ready (_nif_master_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_0_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_0_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_0_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_c_valid (_nif_master_io_flits_c_valid),
.io_flits_d_ready (_nif_master_io_flits_d_ready),
.io_flits_d_valid (_noc_io_egress_1_flit_valid), // @[Protocol.scala:116:19]
.io_flits_d_bits_head (_noc_io_egress_1_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_d_bits_tail (_noc_io_egress_1_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_d_bits_payload (_noc_io_egress_1_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_io_flits_e_valid)
); // @[Tilelink.scala:276:32]
TLMasterToNoC_1 nif_master_1 ( // @[Tilelink.scala:276:32]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_in_1_a_ready),
.io_tilelink_a_valid (io_protocol_0_in_1_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_in_1_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_in_1_a_bits_param),
.io_tilelink_a_bits_size (io_protocol_0_in_1_a_bits_size),
.io_tilelink_a_bits_source ({4'h0, io_protocol_0_in_1_a_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_a_bits_address (io_protocol_0_in_1_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_in_1_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_in_1_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_in_1_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_in_1_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_1_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_1_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_1_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_1_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_1_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_1_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_1_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_1_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_1_b_bits_corrupt),
.io_tilelink_c_ready (io_protocol_0_in_1_c_ready),
.io_tilelink_c_valid (io_protocol_0_in_1_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_in_1_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_in_1_c_bits_param),
.io_tilelink_c_bits_size (io_protocol_0_in_1_c_bits_size),
.io_tilelink_c_bits_source ({4'h0, io_protocol_0_in_1_c_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_c_bits_address (io_protocol_0_in_1_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_in_1_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_in_1_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_in_1_d_ready),
.io_tilelink_d_valid (io_protocol_0_in_1_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_in_1_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_in_1_d_bits_param),
.io_tilelink_d_bits_size (io_protocol_0_in_1_d_bits_size),
.io_tilelink_d_bits_source (_nif_master_1_io_tilelink_d_bits_source),
.io_tilelink_d_bits_sink (io_protocol_0_in_1_d_bits_sink),
.io_tilelink_d_bits_denied (io_protocol_0_in_1_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_in_1_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_in_1_d_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_1_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_1_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_1_e_bits_sink),
.io_flits_a_ready (_noc_io_ingress_3_flit_ready), // @[Protocol.scala:116:19]
.io_flits_a_valid (_nif_master_1_io_flits_a_valid),
.io_flits_a_bits_head (_nif_master_1_io_flits_a_bits_head),
.io_flits_a_bits_tail (_nif_master_1_io_flits_a_bits_tail),
.io_flits_a_bits_payload (_nif_master_1_io_flits_a_bits_payload),
.io_flits_a_bits_egress_id (_nif_master_1_io_flits_a_bits_egress_id),
.io_flits_b_ready (_nif_master_1_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_2_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_2_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_2_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_2_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_c_ready (_noc_io_ingress_4_flit_ready), // @[Protocol.scala:116:19]
.io_flits_c_valid (_nif_master_1_io_flits_c_valid),
.io_flits_c_bits_head (_nif_master_1_io_flits_c_bits_head),
.io_flits_c_bits_tail (_nif_master_1_io_flits_c_bits_tail),
.io_flits_c_bits_payload (_nif_master_1_io_flits_c_bits_payload),
.io_flits_c_bits_egress_id (_nif_master_1_io_flits_c_bits_egress_id),
.io_flits_d_ready (_nif_master_1_io_flits_d_ready),
.io_flits_d_valid (_noc_io_egress_3_flit_valid), // @[Protocol.scala:116:19]
.io_flits_d_bits_head (_noc_io_egress_3_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_d_bits_tail (_noc_io_egress_3_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_d_bits_payload (_noc_io_egress_3_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_e_ready (_noc_io_ingress_5_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_1_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_1_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_1_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_1_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:276:32]
TLMasterToNoC_2 nif_master_2 ( // @[Tilelink.scala:276:32]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_in_2_a_ready),
.io_tilelink_a_valid (io_protocol_0_in_2_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_in_2_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_in_2_a_bits_param),
.io_tilelink_a_bits_size (io_protocol_0_in_2_a_bits_size),
.io_tilelink_a_bits_source ({4'h0, io_protocol_0_in_2_a_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_a_bits_address (io_protocol_0_in_2_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_in_2_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_in_2_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_in_2_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_in_2_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_2_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_2_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_2_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_2_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_2_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_2_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_2_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_2_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_2_b_bits_corrupt),
.io_tilelink_c_ready (io_protocol_0_in_2_c_ready),
.io_tilelink_c_valid (io_protocol_0_in_2_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_in_2_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_in_2_c_bits_param),
.io_tilelink_c_bits_size (io_protocol_0_in_2_c_bits_size),
.io_tilelink_c_bits_source ({4'h0, io_protocol_0_in_2_c_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_c_bits_address (io_protocol_0_in_2_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_in_2_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_in_2_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_in_2_d_ready),
.io_tilelink_d_valid (io_protocol_0_in_2_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_in_2_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_in_2_d_bits_param),
.io_tilelink_d_bits_size (io_protocol_0_in_2_d_bits_size),
.io_tilelink_d_bits_source (_nif_master_2_io_tilelink_d_bits_source),
.io_tilelink_d_bits_sink (io_protocol_0_in_2_d_bits_sink),
.io_tilelink_d_bits_denied (io_protocol_0_in_2_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_in_2_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_in_2_d_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_2_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_2_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_2_e_bits_sink),
.io_flits_a_ready (_noc_io_ingress_6_flit_ready), // @[Protocol.scala:116:19]
.io_flits_a_valid (_nif_master_2_io_flits_a_valid),
.io_flits_a_bits_head (_nif_master_2_io_flits_a_bits_head),
.io_flits_a_bits_tail (_nif_master_2_io_flits_a_bits_tail),
.io_flits_a_bits_payload (_nif_master_2_io_flits_a_bits_payload),
.io_flits_a_bits_egress_id (_nif_master_2_io_flits_a_bits_egress_id),
.io_flits_b_ready (_nif_master_2_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_4_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_4_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_4_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_4_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_c_ready (_noc_io_ingress_7_flit_ready), // @[Protocol.scala:116:19]
.io_flits_c_valid (_nif_master_2_io_flits_c_valid),
.io_flits_c_bits_head (_nif_master_2_io_flits_c_bits_head),
.io_flits_c_bits_tail (_nif_master_2_io_flits_c_bits_tail),
.io_flits_c_bits_payload (_nif_master_2_io_flits_c_bits_payload),
.io_flits_c_bits_egress_id (_nif_master_2_io_flits_c_bits_egress_id),
.io_flits_d_ready (_nif_master_2_io_flits_d_ready),
.io_flits_d_valid (_noc_io_egress_5_flit_valid), // @[Protocol.scala:116:19]
.io_flits_d_bits_head (_noc_io_egress_5_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_d_bits_tail (_noc_io_egress_5_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_d_bits_payload (_noc_io_egress_5_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_e_ready (_noc_io_ingress_8_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_2_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_2_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_2_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_2_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:276:32]
TLMasterToNoC_3 nif_master_3 ( // @[Tilelink.scala:276:32]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_in_3_a_ready),
.io_tilelink_a_valid (io_protocol_0_in_3_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_in_3_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_in_3_a_bits_param),
.io_tilelink_a_bits_size (io_protocol_0_in_3_a_bits_size),
.io_tilelink_a_bits_source ({4'h0, io_protocol_0_in_3_a_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_a_bits_address (io_protocol_0_in_3_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_in_3_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_in_3_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_in_3_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_in_3_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_3_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_3_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_3_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_3_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_3_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_3_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_3_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_3_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_3_b_bits_corrupt),
.io_tilelink_c_ready (io_protocol_0_in_3_c_ready),
.io_tilelink_c_valid (io_protocol_0_in_3_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_in_3_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_in_3_c_bits_param),
.io_tilelink_c_bits_size (io_protocol_0_in_3_c_bits_size),
.io_tilelink_c_bits_source ({4'h0, io_protocol_0_in_3_c_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_c_bits_address (io_protocol_0_in_3_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_in_3_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_in_3_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_in_3_d_ready),
.io_tilelink_d_valid (io_protocol_0_in_3_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_in_3_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_in_3_d_bits_param),
.io_tilelink_d_bits_size (io_protocol_0_in_3_d_bits_size),
.io_tilelink_d_bits_source (_nif_master_3_io_tilelink_d_bits_source),
.io_tilelink_d_bits_sink (io_protocol_0_in_3_d_bits_sink),
.io_tilelink_d_bits_denied (io_protocol_0_in_3_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_in_3_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_in_3_d_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_3_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_3_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_3_e_bits_sink),
.io_flits_a_ready (_noc_io_ingress_9_flit_ready), // @[Protocol.scala:116:19]
.io_flits_a_valid (_nif_master_3_io_flits_a_valid),
.io_flits_a_bits_head (_nif_master_3_io_flits_a_bits_head),
.io_flits_a_bits_tail (_nif_master_3_io_flits_a_bits_tail),
.io_flits_a_bits_payload (_nif_master_3_io_flits_a_bits_payload),
.io_flits_a_bits_egress_id (_nif_master_3_io_flits_a_bits_egress_id),
.io_flits_b_ready (_nif_master_3_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_6_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_6_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_6_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_6_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_c_ready (_noc_io_ingress_10_flit_ready), // @[Protocol.scala:116:19]
.io_flits_c_valid (_nif_master_3_io_flits_c_valid),
.io_flits_c_bits_head (_nif_master_3_io_flits_c_bits_head),
.io_flits_c_bits_tail (_nif_master_3_io_flits_c_bits_tail),
.io_flits_c_bits_payload (_nif_master_3_io_flits_c_bits_payload),
.io_flits_c_bits_egress_id (_nif_master_3_io_flits_c_bits_egress_id),
.io_flits_d_ready (_nif_master_3_io_flits_d_ready),
.io_flits_d_valid (_noc_io_egress_7_flit_valid), // @[Protocol.scala:116:19]
.io_flits_d_bits_head (_noc_io_egress_7_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_d_bits_tail (_noc_io_egress_7_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_d_bits_payload (_noc_io_egress_7_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_e_ready (_noc_io_ingress_11_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_3_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_3_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_3_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_3_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:276:32]
TLMasterToNoC_4 nif_master_4 ( // @[Tilelink.scala:276:32]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_in_4_a_ready),
.io_tilelink_a_valid (io_protocol_0_in_4_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_in_4_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_in_4_a_bits_param),
.io_tilelink_a_bits_size (io_protocol_0_in_4_a_bits_size),
.io_tilelink_a_bits_source ({4'h0, io_protocol_0_in_4_a_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_a_bits_address (io_protocol_0_in_4_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_in_4_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_in_4_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_in_4_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_in_4_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_4_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_4_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_4_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_4_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_4_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_4_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_4_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_4_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_4_b_bits_corrupt),
.io_tilelink_c_ready (io_protocol_0_in_4_c_ready),
.io_tilelink_c_valid (io_protocol_0_in_4_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_in_4_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_in_4_c_bits_param),
.io_tilelink_c_bits_size (io_protocol_0_in_4_c_bits_size),
.io_tilelink_c_bits_source ({4'h0, io_protocol_0_in_4_c_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_c_bits_address (io_protocol_0_in_4_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_in_4_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_in_4_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_in_4_d_ready),
.io_tilelink_d_valid (io_protocol_0_in_4_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_in_4_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_in_4_d_bits_param),
.io_tilelink_d_bits_size (io_protocol_0_in_4_d_bits_size),
.io_tilelink_d_bits_source (_nif_master_4_io_tilelink_d_bits_source),
.io_tilelink_d_bits_sink (io_protocol_0_in_4_d_bits_sink),
.io_tilelink_d_bits_denied (io_protocol_0_in_4_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_in_4_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_in_4_d_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_4_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_4_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_4_e_bits_sink),
.io_flits_a_ready (_noc_io_ingress_12_flit_ready), // @[Protocol.scala:116:19]
.io_flits_a_valid (_nif_master_4_io_flits_a_valid),
.io_flits_a_bits_head (_nif_master_4_io_flits_a_bits_head),
.io_flits_a_bits_tail (_nif_master_4_io_flits_a_bits_tail),
.io_flits_a_bits_payload (_nif_master_4_io_flits_a_bits_payload),
.io_flits_a_bits_egress_id (_nif_master_4_io_flits_a_bits_egress_id),
.io_flits_b_ready (_nif_master_4_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_8_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_8_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_8_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_8_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_c_ready (_noc_io_ingress_13_flit_ready), // @[Protocol.scala:116:19]
.io_flits_c_valid (_nif_master_4_io_flits_c_valid),
.io_flits_c_bits_head (_nif_master_4_io_flits_c_bits_head),
.io_flits_c_bits_tail (_nif_master_4_io_flits_c_bits_tail),
.io_flits_c_bits_payload (_nif_master_4_io_flits_c_bits_payload),
.io_flits_c_bits_egress_id (_nif_master_4_io_flits_c_bits_egress_id),
.io_flits_d_ready (_nif_master_4_io_flits_d_ready),
.io_flits_d_valid (_noc_io_egress_9_flit_valid), // @[Protocol.scala:116:19]
.io_flits_d_bits_head (_noc_io_egress_9_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_d_bits_tail (_noc_io_egress_9_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_d_bits_payload (_noc_io_egress_9_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_e_ready (_noc_io_ingress_14_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_4_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_4_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_4_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_4_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:276:32]
TLMasterToNoC_5 nif_master_5 ( // @[Tilelink.scala:276:32]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_in_5_a_ready),
.io_tilelink_a_valid (io_protocol_0_in_5_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_in_5_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_in_5_a_bits_param),
.io_tilelink_a_bits_size (io_protocol_0_in_5_a_bits_size),
.io_tilelink_a_bits_source ({4'h0, io_protocol_0_in_5_a_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_a_bits_address (io_protocol_0_in_5_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_in_5_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_in_5_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_in_5_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_in_5_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_5_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_5_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_5_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_5_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_5_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_5_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_5_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_5_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_5_b_bits_corrupt),
.io_tilelink_c_ready (io_protocol_0_in_5_c_ready),
.io_tilelink_c_valid (io_protocol_0_in_5_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_in_5_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_in_5_c_bits_param),
.io_tilelink_c_bits_size (io_protocol_0_in_5_c_bits_size),
.io_tilelink_c_bits_source ({4'h0, io_protocol_0_in_5_c_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_c_bits_address (io_protocol_0_in_5_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_in_5_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_in_5_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_in_5_d_ready),
.io_tilelink_d_valid (io_protocol_0_in_5_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_in_5_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_in_5_d_bits_param),
.io_tilelink_d_bits_size (io_protocol_0_in_5_d_bits_size),
.io_tilelink_d_bits_source (_nif_master_5_io_tilelink_d_bits_source),
.io_tilelink_d_bits_sink (io_protocol_0_in_5_d_bits_sink),
.io_tilelink_d_bits_denied (io_protocol_0_in_5_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_in_5_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_in_5_d_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_5_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_5_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_5_e_bits_sink),
.io_flits_a_ready (_noc_io_ingress_15_flit_ready), // @[Protocol.scala:116:19]
.io_flits_a_valid (_nif_master_5_io_flits_a_valid),
.io_flits_a_bits_head (_nif_master_5_io_flits_a_bits_head),
.io_flits_a_bits_tail (_nif_master_5_io_flits_a_bits_tail),
.io_flits_a_bits_payload (_nif_master_5_io_flits_a_bits_payload),
.io_flits_a_bits_egress_id (_nif_master_5_io_flits_a_bits_egress_id),
.io_flits_b_ready (_nif_master_5_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_10_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_10_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_10_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_10_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_c_ready (_noc_io_ingress_16_flit_ready), // @[Protocol.scala:116:19]
.io_flits_c_valid (_nif_master_5_io_flits_c_valid),
.io_flits_c_bits_head (_nif_master_5_io_flits_c_bits_head),
.io_flits_c_bits_tail (_nif_master_5_io_flits_c_bits_tail),
.io_flits_c_bits_payload (_nif_master_5_io_flits_c_bits_payload),
.io_flits_c_bits_egress_id (_nif_master_5_io_flits_c_bits_egress_id),
.io_flits_d_ready (_nif_master_5_io_flits_d_ready),
.io_flits_d_valid (_noc_io_egress_11_flit_valid), // @[Protocol.scala:116:19]
.io_flits_d_bits_head (_noc_io_egress_11_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_d_bits_tail (_noc_io_egress_11_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_d_bits_payload (_noc_io_egress_11_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_e_ready (_noc_io_ingress_17_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_5_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_5_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_5_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_5_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:276:32]
TLMasterToNoC_6 nif_master_6 ( // @[Tilelink.scala:276:32]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_in_6_a_ready),
.io_tilelink_a_valid (io_protocol_0_in_6_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_in_6_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_in_6_a_bits_param),
.io_tilelink_a_bits_size (io_protocol_0_in_6_a_bits_size),
.io_tilelink_a_bits_source ({4'h0, io_protocol_0_in_6_a_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_a_bits_address (io_protocol_0_in_6_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_in_6_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_in_6_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_in_6_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_in_6_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_6_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_6_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_6_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_6_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_6_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_6_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_6_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_6_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_6_b_bits_corrupt),
.io_tilelink_c_ready (io_protocol_0_in_6_c_ready),
.io_tilelink_c_valid (io_protocol_0_in_6_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_in_6_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_in_6_c_bits_param),
.io_tilelink_c_bits_size (io_protocol_0_in_6_c_bits_size),
.io_tilelink_c_bits_source ({4'h0, io_protocol_0_in_6_c_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_c_bits_address (io_protocol_0_in_6_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_in_6_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_in_6_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_in_6_d_ready),
.io_tilelink_d_valid (io_protocol_0_in_6_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_in_6_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_in_6_d_bits_param),
.io_tilelink_d_bits_size (io_protocol_0_in_6_d_bits_size),
.io_tilelink_d_bits_source (_nif_master_6_io_tilelink_d_bits_source),
.io_tilelink_d_bits_sink (io_protocol_0_in_6_d_bits_sink),
.io_tilelink_d_bits_denied (io_protocol_0_in_6_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_in_6_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_in_6_d_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_6_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_6_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_6_e_bits_sink),
.io_flits_a_ready (_noc_io_ingress_18_flit_ready), // @[Protocol.scala:116:19]
.io_flits_a_valid (_nif_master_6_io_flits_a_valid),
.io_flits_a_bits_head (_nif_master_6_io_flits_a_bits_head),
.io_flits_a_bits_tail (_nif_master_6_io_flits_a_bits_tail),
.io_flits_a_bits_payload (_nif_master_6_io_flits_a_bits_payload),
.io_flits_a_bits_egress_id (_nif_master_6_io_flits_a_bits_egress_id),
.io_flits_b_ready (_nif_master_6_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_12_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_12_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_12_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_12_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_c_ready (_noc_io_ingress_19_flit_ready), // @[Protocol.scala:116:19]
.io_flits_c_valid (_nif_master_6_io_flits_c_valid),
.io_flits_c_bits_head (_nif_master_6_io_flits_c_bits_head),
.io_flits_c_bits_tail (_nif_master_6_io_flits_c_bits_tail),
.io_flits_c_bits_payload (_nif_master_6_io_flits_c_bits_payload),
.io_flits_c_bits_egress_id (_nif_master_6_io_flits_c_bits_egress_id),
.io_flits_d_ready (_nif_master_6_io_flits_d_ready),
.io_flits_d_valid (_noc_io_egress_13_flit_valid), // @[Protocol.scala:116:19]
.io_flits_d_bits_head (_noc_io_egress_13_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_d_bits_tail (_noc_io_egress_13_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_d_bits_payload (_noc_io_egress_13_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_e_ready (_noc_io_ingress_20_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_6_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_6_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_6_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_6_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:276:32]
TLMasterToNoC_7 nif_master_7 ( // @[Tilelink.scala:276:32]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_in_7_a_ready),
.io_tilelink_a_valid (io_protocol_0_in_7_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_in_7_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_in_7_a_bits_param),
.io_tilelink_a_bits_size (io_protocol_0_in_7_a_bits_size),
.io_tilelink_a_bits_source ({4'h0, io_protocol_0_in_7_a_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_a_bits_address (io_protocol_0_in_7_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_in_7_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_in_7_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_in_7_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_in_7_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_7_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_7_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_7_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_7_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_7_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_7_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_7_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_7_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_7_b_bits_corrupt),
.io_tilelink_c_ready (io_protocol_0_in_7_c_ready),
.io_tilelink_c_valid (io_protocol_0_in_7_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_in_7_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_in_7_c_bits_param),
.io_tilelink_c_bits_size (io_protocol_0_in_7_c_bits_size),
.io_tilelink_c_bits_source ({4'h0, io_protocol_0_in_7_c_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_c_bits_address (io_protocol_0_in_7_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_in_7_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_in_7_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_in_7_d_ready),
.io_tilelink_d_valid (io_protocol_0_in_7_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_in_7_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_in_7_d_bits_param),
.io_tilelink_d_bits_size (io_protocol_0_in_7_d_bits_size),
.io_tilelink_d_bits_source (_nif_master_7_io_tilelink_d_bits_source),
.io_tilelink_d_bits_sink (io_protocol_0_in_7_d_bits_sink),
.io_tilelink_d_bits_denied (io_protocol_0_in_7_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_in_7_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_in_7_d_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_7_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_7_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_7_e_bits_sink),
.io_flits_a_ready (_noc_io_ingress_21_flit_ready), // @[Protocol.scala:116:19]
.io_flits_a_valid (_nif_master_7_io_flits_a_valid),
.io_flits_a_bits_head (_nif_master_7_io_flits_a_bits_head),
.io_flits_a_bits_tail (_nif_master_7_io_flits_a_bits_tail),
.io_flits_a_bits_payload (_nif_master_7_io_flits_a_bits_payload),
.io_flits_a_bits_egress_id (_nif_master_7_io_flits_a_bits_egress_id),
.io_flits_b_ready (_nif_master_7_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_14_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_14_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_14_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_14_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_c_ready (_noc_io_ingress_22_flit_ready), // @[Protocol.scala:116:19]
.io_flits_c_valid (_nif_master_7_io_flits_c_valid),
.io_flits_c_bits_head (_nif_master_7_io_flits_c_bits_head),
.io_flits_c_bits_tail (_nif_master_7_io_flits_c_bits_tail),
.io_flits_c_bits_payload (_nif_master_7_io_flits_c_bits_payload),
.io_flits_c_bits_egress_id (_nif_master_7_io_flits_c_bits_egress_id),
.io_flits_d_ready (_nif_master_7_io_flits_d_ready),
.io_flits_d_valid (_noc_io_egress_15_flit_valid), // @[Protocol.scala:116:19]
.io_flits_d_bits_head (_noc_io_egress_15_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_d_bits_tail (_noc_io_egress_15_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_d_bits_payload (_noc_io_egress_15_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_e_ready (_noc_io_ingress_23_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_7_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_7_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_7_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_7_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:276:32]
TLMasterToNoC_8 nif_master_8 ( // @[Tilelink.scala:276:32]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_in_8_a_ready),
.io_tilelink_a_valid (io_protocol_0_in_8_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_in_8_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_in_8_a_bits_param),
.io_tilelink_a_bits_size (io_protocol_0_in_8_a_bits_size),
.io_tilelink_a_bits_source ({4'h0, io_protocol_0_in_8_a_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_a_bits_address (io_protocol_0_in_8_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_in_8_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_in_8_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_in_8_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_in_8_b_ready),
.io_tilelink_b_valid (io_protocol_0_in_8_b_valid),
.io_tilelink_b_bits_opcode (io_protocol_0_in_8_b_bits_opcode),
.io_tilelink_b_bits_param (io_protocol_0_in_8_b_bits_param),
.io_tilelink_b_bits_size (io_protocol_0_in_8_b_bits_size),
.io_tilelink_b_bits_source (_nif_master_8_io_tilelink_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_in_8_b_bits_address),
.io_tilelink_b_bits_mask (io_protocol_0_in_8_b_bits_mask),
.io_tilelink_b_bits_data (io_protocol_0_in_8_b_bits_data),
.io_tilelink_b_bits_corrupt (io_protocol_0_in_8_b_bits_corrupt),
.io_tilelink_c_ready (io_protocol_0_in_8_c_ready),
.io_tilelink_c_valid (io_protocol_0_in_8_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_in_8_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_in_8_c_bits_param),
.io_tilelink_c_bits_size (io_protocol_0_in_8_c_bits_size),
.io_tilelink_c_bits_source ({4'h0, io_protocol_0_in_8_c_bits_source}), // @[Tilelink.scala:238:32]
.io_tilelink_c_bits_address (io_protocol_0_in_8_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_in_8_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_in_8_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_in_8_d_ready),
.io_tilelink_d_valid (io_protocol_0_in_8_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_in_8_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_in_8_d_bits_param),
.io_tilelink_d_bits_size (io_protocol_0_in_8_d_bits_size),
.io_tilelink_d_bits_source (_nif_master_8_io_tilelink_d_bits_source),
.io_tilelink_d_bits_sink (io_protocol_0_in_8_d_bits_sink),
.io_tilelink_d_bits_denied (io_protocol_0_in_8_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_in_8_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_in_8_d_bits_corrupt),
.io_tilelink_e_ready (io_protocol_0_in_8_e_ready),
.io_tilelink_e_valid (io_protocol_0_in_8_e_valid),
.io_tilelink_e_bits_sink (io_protocol_0_in_8_e_bits_sink),
.io_flits_a_ready (_noc_io_ingress_24_flit_ready), // @[Protocol.scala:116:19]
.io_flits_a_valid (_nif_master_8_io_flits_a_valid),
.io_flits_a_bits_head (_nif_master_8_io_flits_a_bits_head),
.io_flits_a_bits_tail (_nif_master_8_io_flits_a_bits_tail),
.io_flits_a_bits_payload (_nif_master_8_io_flits_a_bits_payload),
.io_flits_a_bits_egress_id (_nif_master_8_io_flits_a_bits_egress_id),
.io_flits_b_ready (_nif_master_8_io_flits_b_ready),
.io_flits_b_valid (_noc_io_egress_16_flit_valid), // @[Protocol.scala:116:19]
.io_flits_b_bits_head (_noc_io_egress_16_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_b_bits_tail (_noc_io_egress_16_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_b_bits_payload (_noc_io_egress_16_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_c_ready (_noc_io_ingress_25_flit_ready), // @[Protocol.scala:116:19]
.io_flits_c_valid (_nif_master_8_io_flits_c_valid),
.io_flits_c_bits_head (_nif_master_8_io_flits_c_bits_head),
.io_flits_c_bits_tail (_nif_master_8_io_flits_c_bits_tail),
.io_flits_c_bits_payload (_nif_master_8_io_flits_c_bits_payload),
.io_flits_c_bits_egress_id (_nif_master_8_io_flits_c_bits_egress_id),
.io_flits_d_ready (_nif_master_8_io_flits_d_ready),
.io_flits_d_valid (_noc_io_egress_17_flit_valid), // @[Protocol.scala:116:19]
.io_flits_d_bits_head (_noc_io_egress_17_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_d_bits_tail (_noc_io_egress_17_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_d_bits_payload (_noc_io_egress_17_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_e_ready (_noc_io_ingress_26_flit_ready), // @[Protocol.scala:116:19]
.io_flits_e_valid (_nif_master_8_io_flits_e_valid),
.io_flits_e_bits_head (_nif_master_8_io_flits_e_bits_head),
.io_flits_e_bits_payload (_nif_master_8_io_flits_e_bits_payload),
.io_flits_e_bits_egress_id (_nif_master_8_io_flits_e_bits_egress_id)
); // @[Tilelink.scala:276:32]
TLSlaveToNoC nif_slave ( // @[Tilelink.scala:303:31]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_out_0_a_ready),
.io_tilelink_a_valid (io_protocol_0_out_0_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_out_0_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_out_0_a_bits_param),
.io_tilelink_a_bits_size (io_protocol_0_out_0_a_bits_size),
.io_tilelink_a_bits_source (io_protocol_0_out_0_a_bits_source),
.io_tilelink_a_bits_address (_nif_slave_io_tilelink_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_out_0_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_out_0_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_out_0_a_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_out_0_d_ready),
.io_tilelink_d_valid (io_protocol_0_out_0_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_out_0_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_out_0_d_bits_param),
.io_tilelink_d_bits_size (io_protocol_0_out_0_d_bits_size),
.io_tilelink_d_bits_source (io_protocol_0_out_0_d_bits_source),
.io_tilelink_d_bits_sink ({4'h0, io_protocol_0_out_0_d_bits_sink}), // @[Tilelink.scala:238:32]
.io_tilelink_d_bits_denied (io_protocol_0_out_0_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_out_0_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_out_0_d_bits_corrupt),
.io_flits_a_ready (_nif_slave_io_flits_a_ready),
.io_flits_a_valid (_noc_io_egress_18_flit_valid), // @[Protocol.scala:116:19]
.io_flits_a_bits_head (_noc_io_egress_18_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_a_bits_tail (_noc_io_egress_18_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_a_bits_payload (_noc_io_egress_18_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_b_valid (_nif_slave_io_flits_b_valid),
.io_flits_c_ready (_nif_slave_io_flits_c_ready),
.io_flits_c_valid (_noc_io_egress_19_flit_valid), // @[Protocol.scala:116:19]
.io_flits_c_bits_head (_noc_io_egress_19_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_c_bits_tail (_noc_io_egress_19_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_d_ready (_noc_io_ingress_28_flit_ready), // @[Protocol.scala:116:19]
.io_flits_d_valid (_nif_slave_io_flits_d_valid),
.io_flits_d_bits_head (_nif_slave_io_flits_d_bits_head),
.io_flits_d_bits_tail (_nif_slave_io_flits_d_bits_tail),
.io_flits_d_bits_payload (_nif_slave_io_flits_d_bits_payload),
.io_flits_d_bits_egress_id (_nif_slave_io_flits_d_bits_egress_id),
.io_flits_e_ready (_nif_slave_io_flits_e_ready),
.io_flits_e_valid (_noc_io_egress_20_flit_valid), // @[Protocol.scala:116:19]
.io_flits_e_bits_head (_noc_io_egress_20_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_e_bits_tail (_noc_io_egress_20_flit_bits_tail) // @[Protocol.scala:116:19]
); // @[Tilelink.scala:303:31]
TLSlaveToNoC_1 nif_slave_1 ( // @[Tilelink.scala:303:31]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_out_1_a_ready),
.io_tilelink_a_valid (io_protocol_0_out_1_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_out_1_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_out_1_a_bits_param),
.io_tilelink_a_bits_size (_nif_slave_1_io_tilelink_a_bits_size),
.io_tilelink_a_bits_source (io_protocol_0_out_1_a_bits_source),
.io_tilelink_a_bits_address (io_protocol_0_out_1_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_out_1_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_out_1_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_out_1_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_out_1_b_ready),
.io_tilelink_b_valid (io_protocol_0_out_1_b_valid),
.io_tilelink_b_bits_param (io_protocol_0_out_1_b_bits_param),
.io_tilelink_b_bits_source (io_protocol_0_out_1_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_out_1_b_bits_address),
.io_tilelink_c_ready (io_protocol_0_out_1_c_ready),
.io_tilelink_c_valid (io_protocol_0_out_1_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_out_1_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_out_1_c_bits_param),
.io_tilelink_c_bits_size (_nif_slave_1_io_tilelink_c_bits_size),
.io_tilelink_c_bits_source (io_protocol_0_out_1_c_bits_source),
.io_tilelink_c_bits_address (io_protocol_0_out_1_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_out_1_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_out_1_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_out_1_d_ready),
.io_tilelink_d_valid (io_protocol_0_out_1_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_out_1_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_out_1_d_bits_param),
.io_tilelink_d_bits_size ({1'h0, io_protocol_0_out_1_d_bits_size}), // @[Tilelink.scala:238:32]
.io_tilelink_d_bits_source (io_protocol_0_out_1_d_bits_source),
.io_tilelink_d_bits_sink ({2'h0, io_protocol_0_out_1_d_bits_sink}), // @[Tilelink.scala:238:32]
.io_tilelink_d_bits_denied (io_protocol_0_out_1_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_out_1_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_out_1_d_bits_corrupt),
.io_tilelink_e_valid (io_protocol_0_out_1_e_valid),
.io_tilelink_e_bits_sink (_nif_slave_1_io_tilelink_e_bits_sink),
.io_flits_a_ready (_nif_slave_1_io_flits_a_ready),
.io_flits_a_valid (_noc_io_egress_21_flit_valid), // @[Protocol.scala:116:19]
.io_flits_a_bits_head (_noc_io_egress_21_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_a_bits_tail (_noc_io_egress_21_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_a_bits_payload (_noc_io_egress_21_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_b_ready (_noc_io_ingress_29_flit_ready), // @[Protocol.scala:116:19]
.io_flits_b_valid (_nif_slave_1_io_flits_b_valid),
.io_flits_b_bits_head (_nif_slave_1_io_flits_b_bits_head),
.io_flits_b_bits_tail (_nif_slave_1_io_flits_b_bits_tail),
.io_flits_b_bits_payload (_nif_slave_1_io_flits_b_bits_payload),
.io_flits_b_bits_egress_id (_nif_slave_1_io_flits_b_bits_egress_id),
.io_flits_c_ready (_nif_slave_1_io_flits_c_ready),
.io_flits_c_valid (_noc_io_egress_22_flit_valid), // @[Protocol.scala:116:19]
.io_flits_c_bits_head (_noc_io_egress_22_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_c_bits_tail (_noc_io_egress_22_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_c_bits_payload (_noc_io_egress_22_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_d_ready (_noc_io_ingress_30_flit_ready), // @[Protocol.scala:116:19]
.io_flits_d_valid (_nif_slave_1_io_flits_d_valid),
.io_flits_d_bits_head (_nif_slave_1_io_flits_d_bits_head),
.io_flits_d_bits_tail (_nif_slave_1_io_flits_d_bits_tail),
.io_flits_d_bits_payload (_nif_slave_1_io_flits_d_bits_payload),
.io_flits_d_bits_egress_id (_nif_slave_1_io_flits_d_bits_egress_id),
.io_flits_e_valid (_noc_io_egress_23_flit_valid), // @[Protocol.scala:116:19]
.io_flits_e_bits_head (_noc_io_egress_23_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_e_bits_tail (_noc_io_egress_23_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_e_bits_payload (_noc_io_egress_23_flit_bits_payload) // @[Protocol.scala:116:19]
); // @[Tilelink.scala:303:31]
TLSlaveToNoC_2 nif_slave_2 ( // @[Tilelink.scala:303:31]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_out_2_a_ready),
.io_tilelink_a_valid (io_protocol_0_out_2_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_out_2_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_out_2_a_bits_param),
.io_tilelink_a_bits_size (_nif_slave_2_io_tilelink_a_bits_size),
.io_tilelink_a_bits_source (io_protocol_0_out_2_a_bits_source),
.io_tilelink_a_bits_address (io_protocol_0_out_2_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_out_2_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_out_2_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_out_2_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_out_2_b_ready),
.io_tilelink_b_valid (io_protocol_0_out_2_b_valid),
.io_tilelink_b_bits_param (io_protocol_0_out_2_b_bits_param),
.io_tilelink_b_bits_source (io_protocol_0_out_2_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_out_2_b_bits_address),
.io_tilelink_c_ready (io_protocol_0_out_2_c_ready),
.io_tilelink_c_valid (io_protocol_0_out_2_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_out_2_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_out_2_c_bits_param),
.io_tilelink_c_bits_size (_nif_slave_2_io_tilelink_c_bits_size),
.io_tilelink_c_bits_source (io_protocol_0_out_2_c_bits_source),
.io_tilelink_c_bits_address (io_protocol_0_out_2_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_out_2_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_out_2_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_out_2_d_ready),
.io_tilelink_d_valid (io_protocol_0_out_2_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_out_2_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_out_2_d_bits_param),
.io_tilelink_d_bits_size ({1'h0, io_protocol_0_out_2_d_bits_size}), // @[Tilelink.scala:238:32]
.io_tilelink_d_bits_source (io_protocol_0_out_2_d_bits_source),
.io_tilelink_d_bits_sink ({2'h0, io_protocol_0_out_2_d_bits_sink}), // @[Tilelink.scala:238:32]
.io_tilelink_d_bits_denied (io_protocol_0_out_2_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_out_2_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_out_2_d_bits_corrupt),
.io_tilelink_e_valid (io_protocol_0_out_2_e_valid),
.io_tilelink_e_bits_sink (_nif_slave_2_io_tilelink_e_bits_sink),
.io_flits_a_ready (_nif_slave_2_io_flits_a_ready),
.io_flits_a_valid (_noc_io_egress_24_flit_valid), // @[Protocol.scala:116:19]
.io_flits_a_bits_head (_noc_io_egress_24_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_a_bits_tail (_noc_io_egress_24_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_a_bits_payload (_noc_io_egress_24_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_b_ready (_noc_io_ingress_31_flit_ready), // @[Protocol.scala:116:19]
.io_flits_b_valid (_nif_slave_2_io_flits_b_valid),
.io_flits_b_bits_head (_nif_slave_2_io_flits_b_bits_head),
.io_flits_b_bits_tail (_nif_slave_2_io_flits_b_bits_tail),
.io_flits_b_bits_payload (_nif_slave_2_io_flits_b_bits_payload),
.io_flits_b_bits_egress_id (_nif_slave_2_io_flits_b_bits_egress_id),
.io_flits_c_ready (_nif_slave_2_io_flits_c_ready),
.io_flits_c_valid (_noc_io_egress_25_flit_valid), // @[Protocol.scala:116:19]
.io_flits_c_bits_head (_noc_io_egress_25_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_c_bits_tail (_noc_io_egress_25_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_c_bits_payload (_noc_io_egress_25_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_d_ready (_noc_io_ingress_32_flit_ready), // @[Protocol.scala:116:19]
.io_flits_d_valid (_nif_slave_2_io_flits_d_valid),
.io_flits_d_bits_head (_nif_slave_2_io_flits_d_bits_head),
.io_flits_d_bits_tail (_nif_slave_2_io_flits_d_bits_tail),
.io_flits_d_bits_payload (_nif_slave_2_io_flits_d_bits_payload),
.io_flits_d_bits_egress_id (_nif_slave_2_io_flits_d_bits_egress_id),
.io_flits_e_valid (_noc_io_egress_26_flit_valid), // @[Protocol.scala:116:19]
.io_flits_e_bits_head (_noc_io_egress_26_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_e_bits_tail (_noc_io_egress_26_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_e_bits_payload (_noc_io_egress_26_flit_bits_payload) // @[Protocol.scala:116:19]
); // @[Tilelink.scala:303:31]
TLSlaveToNoC_3 nif_slave_3 ( // @[Tilelink.scala:303:31]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_out_3_a_ready),
.io_tilelink_a_valid (io_protocol_0_out_3_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_out_3_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_out_3_a_bits_param),
.io_tilelink_a_bits_size (_nif_slave_3_io_tilelink_a_bits_size),
.io_tilelink_a_bits_source (io_protocol_0_out_3_a_bits_source),
.io_tilelink_a_bits_address (io_protocol_0_out_3_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_out_3_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_out_3_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_out_3_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_out_3_b_ready),
.io_tilelink_b_valid (io_protocol_0_out_3_b_valid),
.io_tilelink_b_bits_param (io_protocol_0_out_3_b_bits_param),
.io_tilelink_b_bits_source (io_protocol_0_out_3_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_out_3_b_bits_address),
.io_tilelink_c_ready (io_protocol_0_out_3_c_ready),
.io_tilelink_c_valid (io_protocol_0_out_3_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_out_3_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_out_3_c_bits_param),
.io_tilelink_c_bits_size (_nif_slave_3_io_tilelink_c_bits_size),
.io_tilelink_c_bits_source (io_protocol_0_out_3_c_bits_source),
.io_tilelink_c_bits_address (io_protocol_0_out_3_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_out_3_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_out_3_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_out_3_d_ready),
.io_tilelink_d_valid (io_protocol_0_out_3_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_out_3_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_out_3_d_bits_param),
.io_tilelink_d_bits_size ({1'h0, io_protocol_0_out_3_d_bits_size}), // @[Tilelink.scala:238:32]
.io_tilelink_d_bits_source (io_protocol_0_out_3_d_bits_source),
.io_tilelink_d_bits_sink ({2'h0, io_protocol_0_out_3_d_bits_sink}), // @[Tilelink.scala:238:32]
.io_tilelink_d_bits_denied (io_protocol_0_out_3_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_out_3_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_out_3_d_bits_corrupt),
.io_tilelink_e_valid (io_protocol_0_out_3_e_valid),
.io_tilelink_e_bits_sink (_nif_slave_3_io_tilelink_e_bits_sink),
.io_flits_a_ready (_nif_slave_3_io_flits_a_ready),
.io_flits_a_valid (_noc_io_egress_27_flit_valid), // @[Protocol.scala:116:19]
.io_flits_a_bits_head (_noc_io_egress_27_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_a_bits_tail (_noc_io_egress_27_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_a_bits_payload (_noc_io_egress_27_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_b_ready (_noc_io_ingress_33_flit_ready), // @[Protocol.scala:116:19]
.io_flits_b_valid (_nif_slave_3_io_flits_b_valid),
.io_flits_b_bits_head (_nif_slave_3_io_flits_b_bits_head),
.io_flits_b_bits_tail (_nif_slave_3_io_flits_b_bits_tail),
.io_flits_b_bits_payload (_nif_slave_3_io_flits_b_bits_payload),
.io_flits_b_bits_egress_id (_nif_slave_3_io_flits_b_bits_egress_id),
.io_flits_c_ready (_nif_slave_3_io_flits_c_ready),
.io_flits_c_valid (_noc_io_egress_28_flit_valid), // @[Protocol.scala:116:19]
.io_flits_c_bits_head (_noc_io_egress_28_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_c_bits_tail (_noc_io_egress_28_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_c_bits_payload (_noc_io_egress_28_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_d_ready (_noc_io_ingress_34_flit_ready), // @[Protocol.scala:116:19]
.io_flits_d_valid (_nif_slave_3_io_flits_d_valid),
.io_flits_d_bits_head (_nif_slave_3_io_flits_d_bits_head),
.io_flits_d_bits_tail (_nif_slave_3_io_flits_d_bits_tail),
.io_flits_d_bits_payload (_nif_slave_3_io_flits_d_bits_payload),
.io_flits_d_bits_egress_id (_nif_slave_3_io_flits_d_bits_egress_id),
.io_flits_e_valid (_noc_io_egress_29_flit_valid), // @[Protocol.scala:116:19]
.io_flits_e_bits_head (_noc_io_egress_29_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_e_bits_tail (_noc_io_egress_29_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_e_bits_payload (_noc_io_egress_29_flit_bits_payload) // @[Protocol.scala:116:19]
); // @[Tilelink.scala:303:31]
TLSlaveToNoC_4 nif_slave_4 ( // @[Tilelink.scala:303:31]
.clock (clock),
.reset (reset),
.io_tilelink_a_ready (io_protocol_0_out_4_a_ready),
.io_tilelink_a_valid (io_protocol_0_out_4_a_valid),
.io_tilelink_a_bits_opcode (io_protocol_0_out_4_a_bits_opcode),
.io_tilelink_a_bits_param (io_protocol_0_out_4_a_bits_param),
.io_tilelink_a_bits_size (_nif_slave_4_io_tilelink_a_bits_size),
.io_tilelink_a_bits_source (io_protocol_0_out_4_a_bits_source),
.io_tilelink_a_bits_address (io_protocol_0_out_4_a_bits_address),
.io_tilelink_a_bits_mask (io_protocol_0_out_4_a_bits_mask),
.io_tilelink_a_bits_data (io_protocol_0_out_4_a_bits_data),
.io_tilelink_a_bits_corrupt (io_protocol_0_out_4_a_bits_corrupt),
.io_tilelink_b_ready (io_protocol_0_out_4_b_ready),
.io_tilelink_b_valid (io_protocol_0_out_4_b_valid),
.io_tilelink_b_bits_param (io_protocol_0_out_4_b_bits_param),
.io_tilelink_b_bits_source (io_protocol_0_out_4_b_bits_source),
.io_tilelink_b_bits_address (io_protocol_0_out_4_b_bits_address),
.io_tilelink_c_ready (io_protocol_0_out_4_c_ready),
.io_tilelink_c_valid (io_protocol_0_out_4_c_valid),
.io_tilelink_c_bits_opcode (io_protocol_0_out_4_c_bits_opcode),
.io_tilelink_c_bits_param (io_protocol_0_out_4_c_bits_param),
.io_tilelink_c_bits_size (_nif_slave_4_io_tilelink_c_bits_size),
.io_tilelink_c_bits_source (io_protocol_0_out_4_c_bits_source),
.io_tilelink_c_bits_address (io_protocol_0_out_4_c_bits_address),
.io_tilelink_c_bits_data (io_protocol_0_out_4_c_bits_data),
.io_tilelink_c_bits_corrupt (io_protocol_0_out_4_c_bits_corrupt),
.io_tilelink_d_ready (io_protocol_0_out_4_d_ready),
.io_tilelink_d_valid (io_protocol_0_out_4_d_valid),
.io_tilelink_d_bits_opcode (io_protocol_0_out_4_d_bits_opcode),
.io_tilelink_d_bits_param (io_protocol_0_out_4_d_bits_param),
.io_tilelink_d_bits_size ({1'h0, io_protocol_0_out_4_d_bits_size}), // @[Tilelink.scala:238:32]
.io_tilelink_d_bits_source (io_protocol_0_out_4_d_bits_source),
.io_tilelink_d_bits_sink ({2'h0, io_protocol_0_out_4_d_bits_sink}), // @[Tilelink.scala:238:32]
.io_tilelink_d_bits_denied (io_protocol_0_out_4_d_bits_denied),
.io_tilelink_d_bits_data (io_protocol_0_out_4_d_bits_data),
.io_tilelink_d_bits_corrupt (io_protocol_0_out_4_d_bits_corrupt),
.io_tilelink_e_valid (io_protocol_0_out_4_e_valid),
.io_tilelink_e_bits_sink (_nif_slave_4_io_tilelink_e_bits_sink),
.io_flits_a_ready (_nif_slave_4_io_flits_a_ready),
.io_flits_a_valid (_noc_io_egress_30_flit_valid), // @[Protocol.scala:116:19]
.io_flits_a_bits_head (_noc_io_egress_30_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_a_bits_tail (_noc_io_egress_30_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_a_bits_payload (_noc_io_egress_30_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_b_ready (_noc_io_ingress_35_flit_ready), // @[Protocol.scala:116:19]
.io_flits_b_valid (_nif_slave_4_io_flits_b_valid),
.io_flits_b_bits_head (_nif_slave_4_io_flits_b_bits_head),
.io_flits_b_bits_tail (_nif_slave_4_io_flits_b_bits_tail),
.io_flits_b_bits_payload (_nif_slave_4_io_flits_b_bits_payload),
.io_flits_b_bits_egress_id (_nif_slave_4_io_flits_b_bits_egress_id),
.io_flits_c_ready (_nif_slave_4_io_flits_c_ready),
.io_flits_c_valid (_noc_io_egress_31_flit_valid), // @[Protocol.scala:116:19]
.io_flits_c_bits_head (_noc_io_egress_31_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_c_bits_tail (_noc_io_egress_31_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_c_bits_payload (_noc_io_egress_31_flit_bits_payload), // @[Protocol.scala:116:19]
.io_flits_d_ready (_noc_io_ingress_36_flit_ready), // @[Protocol.scala:116:19]
.io_flits_d_valid (_nif_slave_4_io_flits_d_valid),
.io_flits_d_bits_head (_nif_slave_4_io_flits_d_bits_head),
.io_flits_d_bits_tail (_nif_slave_4_io_flits_d_bits_tail),
.io_flits_d_bits_payload (_nif_slave_4_io_flits_d_bits_payload),
.io_flits_d_bits_egress_id (_nif_slave_4_io_flits_d_bits_egress_id),
.io_flits_e_valid (_noc_io_egress_32_flit_valid), // @[Protocol.scala:116:19]
.io_flits_e_bits_head (_noc_io_egress_32_flit_bits_head), // @[Protocol.scala:116:19]
.io_flits_e_bits_tail (_noc_io_egress_32_flit_bits_tail), // @[Protocol.scala:116:19]
.io_flits_e_bits_payload (_noc_io_egress_32_flit_bits_payload) // @[Protocol.scala:116:19]
); // @[Tilelink.scala:303:31]
assign io_protocol_0_in_8_b_bits_source = _nif_master_8_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_8_d_bits_source = _nif_master_8_io_tilelink_d_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_7_b_bits_source = _nif_master_7_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_7_d_bits_source = _nif_master_7_io_tilelink_d_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_6_b_bits_source = _nif_master_6_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_6_d_bits_source = _nif_master_6_io_tilelink_d_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_5_b_bits_source = _nif_master_5_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_5_d_bits_source = _nif_master_5_io_tilelink_d_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_4_b_bits_source = _nif_master_4_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_4_d_bits_source = _nif_master_4_io_tilelink_d_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_3_b_bits_source = _nif_master_3_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_3_d_bits_source = _nif_master_3_io_tilelink_d_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_2_b_bits_source = _nif_master_2_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_2_d_bits_source = _nif_master_2_io_tilelink_d_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_1_b_bits_source = _nif_master_1_io_tilelink_b_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_1_d_bits_source = _nif_master_1_io_tilelink_d_bits_source[1:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_in_0_d_bits_source = _nif_master_io_tilelink_d_bits_source[4:0]; // @[Tilelink.scala:238:32, :276:32]
assign io_protocol_0_out_4_a_bits_size = _nif_slave_4_io_tilelink_a_bits_size[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_4_c_bits_size = _nif_slave_4_io_tilelink_c_bits_size[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_4_e_bits_sink = _nif_slave_4_io_tilelink_e_bits_sink[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_3_a_bits_size = _nif_slave_3_io_tilelink_a_bits_size[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_3_c_bits_size = _nif_slave_3_io_tilelink_c_bits_size[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_3_e_bits_sink = _nif_slave_3_io_tilelink_e_bits_sink[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_2_a_bits_size = _nif_slave_2_io_tilelink_a_bits_size[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_2_c_bits_size = _nif_slave_2_io_tilelink_c_bits_size[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_2_e_bits_sink = _nif_slave_2_io_tilelink_e_bits_sink[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_1_a_bits_size = _nif_slave_1_io_tilelink_a_bits_size[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_1_c_bits_size = _nif_slave_1_io_tilelink_c_bits_size[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_1_e_bits_sink = _nif_slave_1_io_tilelink_e_bits_sink[2:0]; // @[Tilelink.scala:238:32, :303:31]
assign io_protocol_0_out_0_a_bits_address = _nif_slave_io_tilelink_a_bits_address[28:0]; // @[Tilelink.scala:238:32, :303:31]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundRawFNToRecFN_e8_s24_61( // @[RoundAnyRawFNToRecFN.scala:295:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_61 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15]
.io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags_0)
); // @[RoundAnyRawFNToRecFN.scala:310:15]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_4( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
wire _GEN = io_in_flit_0_bits_virt_channel_id == 2'h1; // @[Monitor.scala:21:46]
wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 2'h2; // @[Monitor.scala:21:46] |
Generate the Verilog code corresponding to the following Chisel files.
File Periphery.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.devices.debug
import chisel3._
import chisel3.experimental.{noPrefix, IntParam}
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.apb.{APBBundle, APBBundleParameters, APBMasterNode, APBMasterParameters, APBMasterPortParameters}
import freechips.rocketchip.interrupts.{IntSyncXbar, NullIntSyncSource}
import freechips.rocketchip.jtag.JTAGIO
import freechips.rocketchip.prci.{ClockSinkNode, ClockSinkParameters}
import freechips.rocketchip.subsystem.{BaseSubsystem, CBUS, FBUS, ResetSynchronous, SubsystemResetSchemeKey, TLBusWrapperLocation}
import freechips.rocketchip.tilelink.{TLFragmenter, TLWidthWidget}
import freechips.rocketchip.util.{AsyncResetSynchronizerShiftReg, CanHavePSDTestModeIO, ClockGate, PSDTestMode, PlusArg, ResetSynchronizerShiftReg}
import freechips.rocketchip.util.BooleanToAugmentedBoolean
/** Protocols used for communicating with external debugging tools */
sealed trait DebugExportProtocol
case object DMI extends DebugExportProtocol
case object JTAG extends DebugExportProtocol
case object CJTAG extends DebugExportProtocol
case object APB extends DebugExportProtocol
/** Options for possible debug interfaces */
case class DebugAttachParams(
protocols: Set[DebugExportProtocol] = Set(DMI),
externalDisable: Boolean = false,
masterWhere: TLBusWrapperLocation = FBUS,
slaveWhere: TLBusWrapperLocation = CBUS
) {
def dmi = protocols.contains(DMI)
def jtag = protocols.contains(JTAG)
def cjtag = protocols.contains(CJTAG)
def apb = protocols.contains(APB)
}
case object ExportDebug extends Field(DebugAttachParams())
class ClockedAPBBundle(params: APBBundleParameters) extends APBBundle(params) {
val clock = Clock()
val reset = Reset()
}
class DebugIO(implicit val p: Parameters) extends Bundle {
val clock = Input(Clock())
val reset = Input(Reset())
val clockeddmi = p(ExportDebug).dmi.option(Flipped(new ClockedDMIIO()))
val systemjtag = p(ExportDebug).jtag.option(new SystemJTAGIO)
val apb = p(ExportDebug).apb.option(Flipped(new ClockedAPBBundle(APBBundleParameters(addrBits=12, dataBits=32))))
//------------------------------
val ndreset = Output(Bool())
val dmactive = Output(Bool())
val dmactiveAck = Input(Bool())
val extTrigger = (p(DebugModuleKey).get.nExtTriggers > 0).option(new DebugExtTriggerIO())
val disableDebug = p(ExportDebug).externalDisable.option(Input(Bool()))
}
class PSDIO(implicit val p: Parameters) extends Bundle with CanHavePSDTestModeIO {
}
class ResetCtrlIO(val nComponents: Int)(implicit val p: Parameters) extends Bundle {
val hartResetReq = (p(DebugModuleKey).exists(x=>x.hasHartResets)).option(Output(Vec(nComponents, Bool())))
val hartIsInReset = Input(Vec(nComponents, Bool()))
}
/** Either adds a JTAG DTM to system, and exports a JTAG interface,
* or exports the Debug Module Interface (DMI), or exports and hooks up APB,
* based on a global parameter.
*/
trait HasPeripheryDebug { this: BaseSubsystem =>
private lazy val tlbus = locateTLBusWrapper(p(ExportDebug).slaveWhere)
lazy val debugCustomXbarOpt = p(DebugModuleKey).map(params => LazyModule( new DebugCustomXbar(outputRequiresInput = false)))
lazy val apbDebugNodeOpt = p(ExportDebug).apb.option(APBMasterNode(Seq(APBMasterPortParameters(Seq(APBMasterParameters("debugAPB"))))))
val debugTLDomainOpt = p(DebugModuleKey).map { _ =>
val domain = ClockSinkNode(Seq(ClockSinkParameters()))
domain := tlbus.fixedClockNode
domain
}
lazy val debugOpt = p(DebugModuleKey).map { params =>
val tlDM = LazyModule(new TLDebugModule(tlbus.beatBytes))
tlDM.node := tlbus.coupleTo("debug"){ TLFragmenter(tlbus.beatBytes, tlbus.blockBytes, nameSuffix = Some("Debug")) := _ }
tlDM.dmInner.dmInner.customNode := debugCustomXbarOpt.get.node
(apbDebugNodeOpt zip tlDM.apbNodeOpt) foreach { case (master, slave) =>
slave := master
}
tlDM.dmInner.dmInner.sb2tlOpt.foreach { sb2tl =>
locateTLBusWrapper(p(ExportDebug).masterWhere).coupleFrom("debug_sb") {
_ := TLWidthWidget(1) := sb2tl.node
}
}
tlDM
}
val debugNode = debugOpt.map(_.intnode)
val psd = InModuleBody {
val psd = IO(new PSDIO)
psd
}
val resetctrl = InModuleBody {
debugOpt.map { debug =>
debug.module.io.tl_reset := debugTLDomainOpt.get.in.head._1.reset
debug.module.io.tl_clock := debugTLDomainOpt.get.in.head._1.clock
val resetctrl = IO(new ResetCtrlIO(debug.dmOuter.dmOuter.intnode.edges.out.size))
debug.module.io.hartIsInReset := resetctrl.hartIsInReset
resetctrl.hartResetReq.foreach { rcio => debug.module.io.hartResetReq.foreach { rcdm => rcio := rcdm }}
resetctrl
}
}
// noPrefix is workaround https://github.com/freechipsproject/chisel3/issues/1603
val debug = InModuleBody { noPrefix(debugOpt.map { debugmod =>
val debug = IO(new DebugIO)
require(!(debug.clockeddmi.isDefined && debug.systemjtag.isDefined),
"You cannot have both DMI and JTAG interface in HasPeripheryDebug")
require(!(debug.clockeddmi.isDefined && debug.apb.isDefined),
"You cannot have both DMI and APB interface in HasPeripheryDebug")
require(!(debug.systemjtag.isDefined && debug.apb.isDefined),
"You cannot have both APB and JTAG interface in HasPeripheryDebug")
debug.clockeddmi.foreach { dbg => debugmod.module.io.dmi.get <> dbg }
(debug.apb
zip apbDebugNodeOpt
zip debugmod.module.io.apb_clock
zip debugmod.module.io.apb_reset).foreach {
case (((io, apb), c ), r) =>
apb.out(0)._1 <> io
c:= io.clock
r:= io.reset
}
debugmod.module.io.debug_reset := debug.reset
debugmod.module.io.debug_clock := debug.clock
debug.ndreset := debugmod.module.io.ctrl.ndreset
debug.dmactive := debugmod.module.io.ctrl.dmactive
debugmod.module.io.ctrl.dmactiveAck := debug.dmactiveAck
debug.extTrigger.foreach { x => debugmod.module.io.extTrigger.foreach {y => x <> y}}
// TODO in inheriting traits: Set this to something meaningful, e.g. "component is in reset or powered down"
debugmod.module.io.ctrl.debugUnavail.foreach { _ := false.B }
debug
})}
val dtm = InModuleBody { debug.flatMap(_.systemjtag.map(instantiateJtagDTM(_))) }
def instantiateJtagDTM(sj: SystemJTAGIO): DebugTransportModuleJTAG = {
val dtm = Module(new DebugTransportModuleJTAG(p(DebugModuleKey).get.nDMIAddrSize, p(JtagDTMKey)))
dtm.io.jtag <> sj.jtag
debug.map(_.disableDebug.foreach { x => dtm.io.jtag.TMS := sj.jtag.TMS | x }) // force TMS high when debug is disabled
dtm.io.jtag_clock := sj.jtag.TCK
dtm.io.jtag_reset := sj.reset
dtm.io.jtag_mfr_id := sj.mfr_id
dtm.io.jtag_part_number := sj.part_number
dtm.io.jtag_version := sj.version
dtm.rf_reset := sj.reset
debugOpt.map { outerdebug =>
outerdebug.module.io.dmi.get.dmi <> dtm.io.dmi
outerdebug.module.io.dmi.get.dmiClock := sj.jtag.TCK
outerdebug.module.io.dmi.get.dmiReset := sj.reset
}
dtm
}
}
/** BlackBox to export DMI interface */
class SimDTM(implicit p: Parameters) extends BlackBox with HasBlackBoxResource {
val io = IO(new Bundle {
val clk = Input(Clock())
val reset = Input(Bool())
val debug = new DMIIO
val exit = Output(UInt(32.W))
})
def connect(tbclk: Clock, tbreset: Bool, dutio: ClockedDMIIO, tbsuccess: Bool) = {
io.clk := tbclk
io.reset := tbreset
dutio.dmi <> io.debug
dutio.dmiClock := tbclk
dutio.dmiReset := tbreset
tbsuccess := io.exit === 1.U
assert(io.exit < 2.U, "*** FAILED *** (exit code = %d)\n", io.exit >> 1.U)
}
addResource("/vsrc/SimDTM.v")
addResource("/csrc/SimDTM.cc")
}
/** BlackBox to export JTAG interface */
class SimJTAG(tickDelay: Int = 50) extends BlackBox(Map("TICK_DELAY" -> IntParam(tickDelay)))
with HasBlackBoxResource {
val io = IO(new Bundle {
val clock = Input(Clock())
val reset = Input(Bool())
val jtag = new JTAGIO(hasTRSTn = true)
val enable = Input(Bool())
val init_done = Input(Bool())
val exit = Output(UInt(32.W))
})
def connect(dutio: JTAGIO, tbclock: Clock, tbreset: Bool, init_done: Bool, tbsuccess: Bool) = {
dutio.TCK := io.jtag.TCK
dutio.TMS := io.jtag.TMS
dutio.TDI := io.jtag.TDI
io.jtag.TDO := dutio.TDO
io.clock := tbclock
io.reset := tbreset
io.enable := PlusArg("jtag_rbb_enable", 0, "Enable SimJTAG for JTAG Connections. Simulation will pause until connection is made.")
io.init_done := init_done
// Success is determined by the gdbserver
// which is controlling this simulation.
tbsuccess := io.exit === 1.U
assert(io.exit < 2.U, "*** FAILED *** (exit code = %d)\n", io.exit >> 1.U)
}
addResource("/vsrc/SimJTAG.v")
addResource("/csrc/SimJTAG.cc")
addResource("/csrc/remote_bitbang.h")
addResource("/csrc/remote_bitbang.cc")
}
object Debug {
def connectDebug(
debugOpt: Option[DebugIO],
resetctrlOpt: Option[ResetCtrlIO],
psdio: PSDIO,
c: Clock,
r: Bool,
out: Bool,
tckHalfPeriod: Int = 2,
cmdDelay: Int = 2,
psd: PSDTestMode = 0.U.asTypeOf(new PSDTestMode()))
(implicit p: Parameters): Unit = {
connectDebugClockAndReset(debugOpt, c)
resetctrlOpt.map { rcio => rcio.hartIsInReset.map { _ := r }}
debugOpt.map { debug =>
debug.clockeddmi.foreach { d =>
val dtm = Module(new SimDTM).connect(c, r, d, out)
}
debug.systemjtag.foreach { sj =>
val jtag = Module(new SimJTAG(tickDelay=3)).connect(sj.jtag, c, r, ~r, out)
sj.reset := r.asAsyncReset
sj.mfr_id := p(JtagDTMKey).idcodeManufId.U(11.W)
sj.part_number := p(JtagDTMKey).idcodePartNum.U(16.W)
sj.version := p(JtagDTMKey).idcodeVersion.U(4.W)
}
debug.apb.foreach { apb =>
require(false, "No support for connectDebug for an APB debug connection.")
}
psdio.psd.foreach { _ <> psd }
debug.disableDebug.foreach { x => x := false.B }
}
}
def connectDebugClockAndReset(debugOpt: Option[DebugIO], c: Clock, sync: Boolean = true)(implicit p: Parameters): Unit = {
debugOpt.foreach { debug =>
val dmi_reset = debug.clockeddmi.map(_.dmiReset.asBool).getOrElse(false.B) |
debug.systemjtag.map(_.reset.asBool).getOrElse(false.B) |
debug.apb.map(_.reset.asBool).getOrElse(false.B)
connectDebugClockHelper(debug, dmi_reset, c, sync)
}
}
def connectDebugClockHelper(debug: DebugIO, dmi_reset: Reset, c: Clock, sync: Boolean = true)(implicit p: Parameters): Unit = {
val debug_reset = Wire(Bool())
withClockAndReset(c, dmi_reset) {
val debug_reset_syncd = if(sync) ~AsyncResetSynchronizerShiftReg(in=true.B, sync=3, name=Some("debug_reset_sync")) else dmi_reset
debug_reset := debug_reset_syncd
}
// Need to clock DM during debug_reset because of synchronous reset, so keep
// the clock alive for one cycle after debug_reset asserts to action this behavior.
// The unit should also be clocked when dmactive is high.
withClockAndReset(c, debug_reset.asAsyncReset) {
val dmactiveAck = if (sync) ResetSynchronizerShiftReg(in=debug.dmactive, sync=3, name=Some("dmactiveAck")) else debug.dmactive
val clock_en = RegNext(next=dmactiveAck, init=true.B)
val gated_clock =
if (!p(DebugModuleKey).get.clockGate) c
else ClockGate(c, clock_en, "debug_clock_gate")
debug.clock := gated_clock
debug.reset := (if (p(SubsystemResetSchemeKey)==ResetSynchronous) debug_reset else debug_reset.asAsyncReset)
debug.dmactiveAck := dmactiveAck
}
}
def tieoffDebug(debugOpt: Option[DebugIO], resetctrlOpt: Option[ResetCtrlIO] = None, psdio: Option[PSDIO] = None)(implicit p: Parameters): Bool = {
psdio.foreach(_.psd.foreach { _ <> 0.U.asTypeOf(new PSDTestMode()) } )
resetctrlOpt.map { rcio => rcio.hartIsInReset.map { _ := false.B }}
debugOpt.map { debug =>
debug.clock := true.B.asClock
debug.reset := (if (p(SubsystemResetSchemeKey)==ResetSynchronous) true.B else true.B.asAsyncReset)
debug.systemjtag.foreach { sj =>
sj.jtag.TCK := true.B.asClock
sj.jtag.TMS := true.B
sj.jtag.TDI := true.B
sj.jtag.TRSTn.foreach { r => r := true.B }
sj.reset := true.B.asAsyncReset
sj.mfr_id := 0.U
sj.part_number := 0.U
sj.version := 0.U
}
debug.clockeddmi.foreach { d =>
d.dmi.req.valid := false.B
d.dmi.req.bits.addr := 0.U
d.dmi.req.bits.data := 0.U
d.dmi.req.bits.op := 0.U
d.dmi.resp.ready := true.B
d.dmiClock := false.B.asClock
d.dmiReset := true.B.asAsyncReset
}
debug.apb.foreach { apb =>
apb.clock := false.B.asClock
apb.reset := true.B.asAsyncReset
apb.pready := false.B
apb.pslverr := false.B
apb.prdata := 0.U
apb.pduser := 0.U.asTypeOf(chiselTypeOf(apb.pduser))
apb.psel := false.B
apb.penable := false.B
}
debug.extTrigger.foreach { t =>
t.in.req := false.B
t.out.ack := t.out.req
}
debug.disableDebug.foreach { x => x := false.B }
debug.dmactiveAck := false.B
debug.ndreset
}.getOrElse(false.B)
}
}
File ResetCatchAndSync.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.{withClockAndReset, withReset}
/** Reset: asynchronous assert,
* synchronous de-assert
*
*/
class ResetCatchAndSync (sync: Int = 3) extends Module {
override def desiredName = s"ResetCatchAndSync_d${sync}"
val io = IO(new Bundle {
val sync_reset = Output(Bool())
val psd = Input(new PSDTestMode())
})
// Bypass both the resets to the flops themselves (to prevent DFT holes on
// those flops) and on the output of the synchronizer circuit (to control
// reset to any flops this circuit drives).
val post_psd_reset = Mux(io.psd.test_mode, io.psd.test_mode_reset, reset.asBool)
withReset(post_psd_reset) {
io.sync_reset := Mux(io.psd.test_mode, io.psd.test_mode_reset,
~AsyncResetSynchronizerShiftReg(true.B, sync))
}
}
object ResetCatchAndSync {
def apply(clk: Clock, rst: Bool, sync: Int = 3, name: Option[String] = None,
psd: Option[PSDTestMode] = None): Bool = {
withClockAndReset(clk, rst) {
val catcher = Module (new ResetCatchAndSync(sync))
if (name.isDefined) {catcher.suggestName(name.get)}
catcher.io.psd <> psd.getOrElse(WireDefault(0.U.asTypeOf(new PSDTestMode())))
catcher.io.sync_reset
}
}
def apply(clk: Clock, rst: Bool, sync: Int, name: String): Bool = apply(clk, rst, sync, Some(name))
def apply(clk: Clock, rst: Bool, name: String): Bool = apply(clk, rst, name = Some(name))
def apply(clk: Clock, rst: Bool, sync: Int, name: String, psd: PSDTestMode): Bool =
apply(clk, rst, sync, Some(name), Some(psd))
def apply(clk: Clock, rst: Bool, name: String, psd: PSDTestMode): Bool =
apply(clk, rst, name = Some(name), psd = Some(psd))
}
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File IOCell.scala:
// See LICENSE for license details
package chipyard.iocell
import chisel3._
import chisel3.util.{Cat, HasBlackBoxInline}
import chisel3.reflect.DataMirror
import chisel3.experimental.{Analog, BaseModule}
// The following four IO cell bundle types are bare-minimum functional connections
// for modeling 4 different IO cell scenarios. The intention is that the user
// would create wrapper modules that extend these interfaces with additional
// control signals. These are loosely similar to the sifive-blocks PinCtrl bundles
// (https://github.com/sifive/sifive-blocks/blob/master/src/main/scala/devices/pinctrl/PinCtrl.scala),
// but we want to avoid a dependency on an external libraries.
/** The base IO bundle for an analog signal (typically something with no digital buffers inside)
* pad: off-chip (external) connection
* core: internal connection
*/
class AnalogIOCellBundle extends Bundle {
val pad = Analog(1.W) // Pad/bump signal (off-chip)
val core = Analog(1.W) // core signal (on-chip)
}
/** The base IO bundle for a signal with runtime-controllable direction
* pad: off-chip (external) connection
* i: input to chip logic (output from IO cell)
* ie: enable signal for i
* o: output from chip logic (input to IO cell)
* oe: enable signal for o
*/
class DigitalGPIOCellBundle extends Bundle {
val pad = Analog(1.W)
val i = Output(Bool())
val ie = Input(Bool())
val o = Input(Bool())
val oe = Input(Bool())
}
/** The base IO bundle for a digital output signal
* pad: off-chip (external) connection
* o: output from chip logic (input to IO cell)
* oe: enable signal for o
*/
class DigitalOutIOCellBundle extends Bundle {
val pad = Output(Bool())
val o = Input(Bool())
val oe = Input(Bool())
}
/** The base IO bundle for a digital input signal
* pad: off-chip (external) connection
* i: input to chip logic (output from IO cell)
* ie: enable signal for i
*/
class DigitalInIOCellBundle extends Bundle {
val pad = Input(Bool())
val i = Output(Bool())
val ie = Input(Bool())
}
trait IOCell extends BaseModule {
var iocell_name: Option[String] = None
/** Set IOCell name
* @param s Proposed name for the IOCell
*
* @return An inherited IOCell with given the proposed name
*/
def suggestName(s: String): this.type = {
iocell_name = Some(s)
super.suggestName(s)
}
}
trait AnalogIOCell extends IOCell {
val io: AnalogIOCellBundle
}
trait DigitalGPIOCell extends IOCell {
val io: DigitalGPIOCellBundle
}
trait DigitalInIOCell extends IOCell {
val io: DigitalInIOCellBundle
}
trait DigitalOutIOCell extends IOCell {
val io: DigitalOutIOCellBundle
}
// The following Generic IO cell black boxes have verilog models that mimic a very simple
// implementation of an IO cell. For building a real chip, it is important to implement
// and use similar classes which wrap the foundry-specific IO cells.
abstract class GenericIOCell extends BlackBox with HasBlackBoxInline {
val impl: String
val moduleName = this.getClass.getSimpleName
setInline(s"$moduleName.v", impl);
}
class GenericAnalogIOCell extends GenericIOCell with AnalogIOCell {
val io = IO(new AnalogIOCellBundle)
lazy val impl = s"""
`timescale 1ns/1ps
module GenericAnalogIOCell(
inout pad,
inout core
);
assign core = 1'bz;
assign pad = core;
endmodule"""
}
class GenericDigitalGPIOCell extends GenericIOCell with DigitalGPIOCell {
val io = IO(new DigitalGPIOCellBundle)
lazy val impl = s"""
`timescale 1ns/1ps
module GenericDigitalGPIOCell(
inout pad,
output i,
input ie,
input o,
input oe
);
assign pad = oe ? o : 1'bz;
assign i = ie ? pad : 1'b0;
endmodule"""
}
class GenericDigitalInIOCell extends GenericIOCell with DigitalInIOCell {
val io = IO(new DigitalInIOCellBundle)
lazy val impl = s"""
`timescale 1ns/1ps
module GenericDigitalInIOCell(
input pad,
output i,
input ie
);
assign i = ie ? pad : 1'b0;
endmodule"""
}
class GenericDigitalOutIOCell extends GenericIOCell with DigitalOutIOCell {
val io = IO(new DigitalOutIOCellBundle)
lazy val impl = s"""
`timescale 1ns/1ps
module GenericDigitalOutIOCell(
output pad,
input o,
input oe
);
assign pad = oe ? o : 1'bz;
endmodule"""
}
trait IOCellTypeParams {
def analog(): AnalogIOCell
def gpio(): DigitalGPIOCell
def input(): DigitalInIOCell
def output(): DigitalOutIOCell
}
case class GenericIOCellParams() extends IOCellTypeParams {
def analog() = Module(new GenericAnalogIOCell)
def gpio() = Module(new GenericDigitalGPIOCell)
def input() = Module(new GenericDigitalInIOCell)
def output() = Module(new GenericDigitalOutIOCell)
}
object IOCell {
/** From within a RawModule or MultiIOModule context, generate new module IOs from a given
* signal and return the new IO and a Seq containing all generated IO cells.
* @param coreSignal The signal onto which to add IO cells
* @param name An optional name or name prefix to use for naming IO cells
* @param abstractResetAsAsync When set, will coerce abstract resets to
* AsyncReset, and otherwise to Bool (sync reset)
* @return A tuple of (the generated IO data node, a Seq of all generated IO cell instances)
*/
def generateIOFromSignal[T <: Data](
coreSignal: T,
name: String,
typeParams: IOCellTypeParams = GenericIOCellParams(),
abstractResetAsAsync: Boolean = false
): (T, Seq[IOCell]) = {
val padSignal = IO(DataMirror.internal.chiselTypeClone[T](coreSignal)).suggestName(name)
val resetFn = if (abstractResetAsAsync) toAsyncReset else toSyncReset
val iocells = IOCell.generateFromSignal(coreSignal, padSignal, Some(s"iocell_$name"), typeParams, resetFn)
(padSignal, iocells)
}
/** Connect two identical signals together by adding IO cells between them and return a Seq
* containing all generated IO cells.
* @param coreSignal The core-side (internal) signal onto which to connect/add IO cells
* @param padSignal The pad-side (external) signal onto which to connect IO cells
* @param name An optional name or name prefix to use for naming IO cells
* @return A Seq of all generated IO cell instances
*/
val toSyncReset: (Reset) => Bool = _.asBool
val toAsyncReset: (Reset) => AsyncReset = _.asAsyncReset
def generateFromSignal[T <: Data, R <: Reset](
coreSignal: T,
padSignal: T,
name: Option[String] = None,
typeParams: IOCellTypeParams = GenericIOCellParams(),
concretizeResetFn: (Reset) => R = toSyncReset
): Seq[IOCell] = {
def genCell[T <: Data](
castToBool: (T) => Bool,
castFromBool: (Bool) => T
)(coreSignal: T,
padSignal: T
): Seq[IOCell] = {
DataMirror.directionOf(coreSignal) match {
case ActualDirection.Input => {
val iocell = typeParams.input()
name.foreach(n => {
iocell.suggestName(n)
})
coreSignal := castFromBool(iocell.io.i)
iocell.io.ie := true.B
iocell.io.pad := castToBool(padSignal)
Seq(iocell)
}
case ActualDirection.Output => {
val iocell = typeParams.output()
name.foreach(n => {
iocell.suggestName(n)
})
iocell.io.o := castToBool(coreSignal)
iocell.io.oe := true.B
padSignal := castFromBool(iocell.io.pad)
Seq(iocell)
}
case _ => throw new Exception(s"Signal does not have a direction and cannot be matched to an IOCell")
}
}
def genCellForClock = genCell[Clock](_.asUInt.asBool, _.asClock) _
def genCellForAsyncReset = genCell[AsyncReset](_.asBool, _.asAsyncReset) _
def genCellForAbstractReset = genCell[Reset](_.asBool, concretizeResetFn) _
(coreSignal, padSignal) match {
case (coreSignal: Analog, padSignal: Analog) => {
if (coreSignal.getWidth == 0) {
Seq()
} else {
require(
coreSignal.getWidth == 1,
"Analogs wider than 1 bit are not supported because we can't bit-select Analogs (https://github.com/freechipsproject/chisel3/issues/536)"
)
val iocell = typeParams.analog()
name.foreach(n => iocell.suggestName(n))
iocell.io.core <> coreSignal
padSignal <> iocell.io.pad
Seq(iocell)
}
}
case (coreSignal: Clock, padSignal: Clock) => genCellForClock(coreSignal, padSignal)
case (coreSignal: AsyncReset, padSignal: AsyncReset) => genCellForAsyncReset(coreSignal, padSignal)
case (coreSignal: Bits, padSignal: Bits) => {
require(padSignal.getWidth == coreSignal.getWidth, "padSignal and coreSignal must be the same width")
if (padSignal.getWidth == 0) {
// This dummy assignment will prevent invalid firrtl from being emitted
DataMirror.directionOf(coreSignal) match {
case ActualDirection.Input => coreSignal := 0.U
case _ => {}
}
Seq()
} else {
DataMirror.directionOf(coreSignal) match {
case ActualDirection.Input => {
val iocells = padSignal.asBools.zipWithIndex.map { case (sig, i) =>
val iocell = typeParams.input()
// Note that we are relying on chisel deterministically naming this in the index order (which it does)
// This has the side-effect of naming index 0 with no _0 suffix, which is how chisel names other signals
// An alternative solution would be to suggestName(n + "_" + i)
name.foreach(n => {
iocell.suggestName(n)
})
iocell.io.pad := sig
iocell.io.ie := true.B
iocell
}
// Note that the reverse here is because Cat(Seq(a,b,c,d)) yields abcd, but a is index 0 of the Seq
coreSignal := Cat(iocells.map(_.io.i).reverse)
iocells
}
case ActualDirection.Output => {
val iocells = coreSignal.asBools.zipWithIndex.map { case (sig, i) =>
val iocell = typeParams.output()
// Note that we are relying on chisel deterministically naming this in the index order (which it does)
// This has the side-effect of naming index 0 with no _0 suffix, which is how chisel names other signals
// An alternative solution would be to suggestName(n + "_" + i)
name.foreach(n => {
iocell.suggestName(n)
})
iocell.io.o := sig
iocell.io.oe := true.B
iocell
}
// Note that the reverse here is because Cat(Seq(a,b,c,d)) yields abcd, but a is index 0 of the Seq
padSignal := Cat(iocells.map(_.io.pad).reverse)
iocells
}
case _ => throw new Exception("Bits signal does not have a direction and cannot be matched to IOCell(s)")
}
}
}
case (coreSignal: Reset, padSignal: Reset) => genCellForAbstractReset(coreSignal, padSignal)
case (coreSignal: Vec[_], padSignal: Vec[_]) => {
require(padSignal.size == coreSignal.size, "size of Vec for padSignal and coreSignal must be the same")
coreSignal.zip(padSignal).zipWithIndex.foldLeft(Seq.empty[IOCell]) { case (total, ((core, pad), i)) =>
val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + i), typeParams)
total ++ ios
}
}
case (coreSignal: Record, padSignal: Record) => {
coreSignal.elements.foldLeft(Seq.empty[IOCell]) { case (total, (eltName, core)) =>
val pad = padSignal.elements(eltName)
val ios = IOCell.generateFromSignal(core, pad, name.map(_ + "_" + eltName), typeParams)
total ++ ios
}
}
case _ => { throw new Exception("Oops, I don't know how to handle this signal.") }
}
}
}
File ChipTop.scala:
package chipyard
import chisel3._
import scala.collection.mutable.{ArrayBuffer}
import freechips.rocketchip.prci.{ClockGroupIdentityNode, ClockSinkParameters, ClockSinkNode, ClockGroup}
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, LazyRawModuleImp, LazyModuleImpLike, BindingScope}
import freechips.rocketchip.util.{DontTouch}
import chipyard.iobinders._
import chipyard.iocell._
case object BuildSystem extends Field[Parameters => LazyModule]((p: Parameters) => new DigitalTop()(p))
/**
* The base class used for building chips. This constructor instantiates a module specified by the BuildSystem parameter,
* named "system", which is an instance of DigitalTop by default. The diplomatic clocks of System, as well as its implicit clock,
* is aggregated into the clockGroupNode. The parameterized functions controlled by ClockingSchemeKey and GlobalResetSchemeKey
* drive clock and reset generation
*/
class ChipTop(implicit p: Parameters) extends LazyModule with BindingScope
with HasIOBinders {
// The system module specified by BuildSystem
lazy val lazySystem = LazyModule(p(BuildSystem)(p)).suggestName("system")
// NOTE: Making this a LazyRawModule is moderately dangerous, as anonymous children
// of ChipTop (ex: ClockGroup) do not receive clock or reset.
// However. anonymous children of ChipTop should not need an implicit Clock or Reset
// anyways, they probably need to be explicitly clocked.
lazy val module: LazyModuleImpLike = new LazyRawModuleImp(this) with DontTouch { }
}
File ClockGate.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{HasBlackBoxResource, HasBlackBoxPath}
import org.chipsalliance.cde.config.{Field, Parameters}
import java.nio.file.{Files, Paths}
case object ClockGateImpl extends Field[() => ClockGate](() => new EICG_wrapper)
case object ClockGateModelFile extends Field[Option[String]](None)
abstract class ClockGate extends BlackBox
with HasBlackBoxResource with HasBlackBoxPath {
val io = IO(new Bundle{
val in = Input(Clock())
val test_en = Input(Bool())
val en = Input(Bool())
val out = Output(Clock())
})
def addVerilogResource(vsrc: String): Unit = {
if (Files.exists(Paths.get(vsrc)))
addPath(vsrc)
else
addResource(vsrc)
}
}
object ClockGate {
def apply[T <: ClockGate](
in: Clock,
en: Bool,
name: Option[String] = None)(implicit p: Parameters): Clock = {
val cg = Module(p(ClockGateImpl)())
name.foreach(cg.suggestName(_))
p(ClockGateModelFile).map(cg.addVerilogResource(_))
cg.io.in := in
cg.io.test_en := false.B
cg.io.en := en
cg.io.out
}
def apply[T <: ClockGate](
in: Clock,
en: Bool,
name: String)(implicit p: Parameters): Clock =
apply(in, en, Some(name))
}
// behavioral model of Integrated Clock Gating cell
class EICG_wrapper extends ClockGate
File IOBinders.scala:
package chipyard.iobinders
import chisel3._
import chisel3.reflect.DataMirror
import chisel3.experimental.Analog
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import org.chipsalliance.diplomacy.aop._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.bundlebridge._
import freechips.rocketchip.diplomacy.{Resource, ResourceBinding, ResourceAddress, RegionType}
import freechips.rocketchip.devices.debug._
import freechips.rocketchip.jtag.{JTAGIO}
import freechips.rocketchip.subsystem._
import freechips.rocketchip.system.{SimAXIMem}
import freechips.rocketchip.amba.axi4.{AXI4Bundle, AXI4SlaveNode, AXI4MasterNode, AXI4EdgeParameters}
import freechips.rocketchip.util._
import freechips.rocketchip.prci._
import freechips.rocketchip.groundtest.{GroundTestSubsystemModuleImp, GroundTestSubsystem}
import freechips.rocketchip.tilelink.{TLBundle}
import sifive.blocks.devices.gpio._
import sifive.blocks.devices.uart._
import sifive.blocks.devices.spi._
import sifive.blocks.devices.i2c._
import tracegen.{TraceGenSystemModuleImp}
import chipyard.iocell._
import testchipip.serdes.{CanHavePeripheryTLSerial, SerialTLKey}
import testchipip.spi.{SPIChipIO}
import testchipip.boot.{CanHavePeripheryCustomBootPin}
import testchipip.soc.{CanHavePeripheryChipIdPin}
import testchipip.util.{ClockedIO}
import testchipip.iceblk.{CanHavePeripheryBlockDevice, BlockDeviceKey, BlockDeviceIO}
import testchipip.cosim.{CanHaveTraceIO, TraceOutputTop, SpikeCosimConfig}
import testchipip.tsi.{CanHavePeripheryUARTTSI, UARTTSIIO}
import icenet.{CanHavePeripheryIceNIC, SimNetwork, NicLoopback, NICKey, NICIOvonly}
import chipyard.{CanHaveMasterTLMemPort, ChipyardSystem, ChipyardSystemModule}
import chipyard.example.{CanHavePeripheryGCD}
import scala.reflect.{ClassTag}
object IOBinderTypes {
type IOBinderTuple = (Seq[Port[_]], Seq[IOCell])
type IOBinderFunction = (Boolean, => Any) => ModuleValue[IOBinderTuple]
}
import IOBinderTypes._
// System for instantiating binders based
// on the scala type of the Target (_not_ its IO). This avoids needing to
// duplicate harnesses (essentially test harnesses) for each target.
// IOBinders is map between string representations of traits to the desired
// IO connection behavior for tops matching that trait. We use strings to enable
// composition and overriding of IOBinders, much like how normal Keys in the config
// system are used/ At elaboration, the testharness traverses this set of functions,
// and functions which match the type of the DigitalTop are evaluated.
// You can add your own binder by adding a new (key, fn) pair, typically by using
// the OverrideIOBinder or ComposeIOBinder macros
case object IOBinders extends Field[Map[String, Seq[IOBinderFunction]]](
Map[String, Seq[IOBinderFunction]]().withDefaultValue(Nil)
)
abstract trait HasIOBinders extends HasChipyardPorts { this: LazyModule =>
val lazySystem: LazyModule
private val iobinders = p(IOBinders)
// Note: IOBinders cannot rely on the implicit clock/reset, as they may be called from the
// context of a LazyRawModuleImp
private val lzy = iobinders.map({ case (s,fns) => s -> fns.map(f => f(true, lazySystem)) })
private val imp = iobinders.map({ case (s,fns) => s -> fns.map(f => f(false, lazySystem.module)) })
private lazy val lzyFlattened: Map[String, IOBinderTuple] = lzy.map({
case (s,ms) => s -> (ms.map(_._1).flatten, ms.map(_._2).flatten)
})
private lazy val impFlattened: Map[String, IOBinderTuple] = imp.map({
case (s,ms) => s -> (ms.map(_._1).flatten, ms.map(_._2).flatten)
})
// A publicly accessible list of IO cells (useful for a floorplanning tool, for example)
val iocells = InModuleBody { (lzyFlattened.values ++ impFlattened.values).unzip._2.flatten.toBuffer }
// A mapping between stringified DigitalSystem traits and their corresponding ChipTop ports
val portMap = InModuleBody { iobinders.keys.map(k => k -> (lzyFlattened(k)._1 ++ impFlattened(k)._1)).toMap }
// A mapping between stringified DigitalSystem traits and their corresponding ChipTop iocells
val iocellMap = InModuleBody { iobinders.keys.map(k => k -> (lzyFlattened(k)._2 ++ impFlattened(k)._2)).toMap }
def ports = portMap.getWrappedValue.values.flatten.toSeq
InModuleBody {
println("IOCells generated by IOBinders:")
for ((k, v) <- iocellMap) {
if (!v.isEmpty) {
val cells = v.map(_.getClass.getSimpleName).groupBy(identity).mapValues(_.size)
println(s" IOBinder for $k generated:")
for ((t, c) <- cells) { println(s" $c X $t") }
}
}
println()
val totals = iocells.map(_.getClass.getSimpleName).groupBy(identity).mapValues(_.size)
println(s" Total generated ${iocells.size} IOCells:")
for ((t, c) <- totals) { println(s" $c X $t") }
}
}
// Note: The parameters instance is accessible only through LazyModule
// or LazyModuleImpLike. The self-type requirement in traits like
// CanHaveMasterAXI4MemPort is insufficient to make it accessible to the IOBinder
// As a result, IOBinders only work on Modules which inherit LazyModule or
// or LazyModuleImpLike
object GetSystemParameters {
def apply(s: Any): Parameters = {
s match {
case s: LazyModule => s.p
case s: LazyModuleImpLike => s.p
case _ => throw new Exception(s"Trying to get Parameters from a system that is not LazyModule or LazyModuleImpLike")
}
}
}
class IOBinder[T](composer: Seq[IOBinderFunction] => Seq[IOBinderFunction])(implicit tag: ClassTag[T]) extends Config((site, here, up) => {
case IOBinders => {
val upMap = up(IOBinders)
upMap + (tag.runtimeClass.toString -> composer(upMap(tag.runtimeClass.toString)))
}
})
class ConcreteIOBinder[T](composes: Boolean, fn: T => IOBinderTuple)(implicit tag: ClassTag[T]) extends IOBinder[T](
up => (if (composes) up else Nil) ++ Seq(((_, t) => { InModuleBody {
t match {
case system: T => fn(system)
case _ => (Nil, Nil)
}
}}): IOBinderFunction)
)
class LazyIOBinder[T](composes: Boolean, fn: T => ModuleValue[IOBinderTuple])(implicit tag: ClassTag[T]) extends IOBinder[T](
up => (if (composes) up else Nil) ++ Seq(((isLazy, t) => {
val empty = new ModuleValue[IOBinderTuple] {
def getWrappedValue: IOBinderTuple = (Nil, Nil)
}
if (isLazy) {
t match {
case system: T => fn(system)
case _ => empty
}
} else {
empty
}
}): IOBinderFunction)
)
// The "Override" binders override any previous IOBinders (lazy or concrete) defined on the same trait.
// The "Compose" binders do not override previously defined IOBinders on the same trait
// The default IOBinders evaluate only in the concrete "ModuleImp" phase of elaboration
// The "Lazy" IOBinders evaluate in the LazyModule phase, but can also generate hardware through InModuleBody
class OverrideIOBinder[T](fn: T => IOBinderTuple)(implicit tag: ClassTag[T]) extends ConcreteIOBinder[T](false, fn)
class ComposeIOBinder[T](fn: T => IOBinderTuple)(implicit tag: ClassTag[T]) extends ConcreteIOBinder[T](true, fn)
class OverrideLazyIOBinder[T](fn: T => ModuleValue[IOBinderTuple])(implicit tag: ClassTag[T]) extends LazyIOBinder[T](false, fn)
class ComposeLazyIOBinder[T](fn: T => ModuleValue[IOBinderTuple])(implicit tag: ClassTag[T]) extends LazyIOBinder[T](true, fn)
case object IOCellKey extends Field[IOCellTypeParams](GenericIOCellParams())
class WithGPIOCells extends OverrideIOBinder({
(system: HasPeripheryGPIO) => {
val (ports2d, cells2d) = system.gpio.zipWithIndex.map({ case (gpio, i) =>
gpio.pins.zipWithIndex.map({ case (pin, j) =>
val p = system.asInstanceOf[BaseSubsystem].p
val g = IO(Analog(1.W)).suggestName(s"gpio_${i}_${j}")
val iocell = p(IOCellKey).gpio().suggestName(s"iocell_gpio_${i}_${j}")
iocell.io.o := pin.o.oval
iocell.io.oe := pin.o.oe
iocell.io.ie := pin.o.ie
pin.i.ival := iocell.io.i
pin.i.po.foreach(_ := DontCare)
iocell.io.pad <> g
(GPIOPort(() => g, i, j), iocell)
}).unzip
}).unzip
(ports2d.flatten, cells2d.flatten)
}
})
class WithGPIOPunchthrough extends OverrideIOBinder({
(system: HasPeripheryGPIO) => {
val ports = system.gpio.zipWithIndex.map { case (gpio, i) =>
val io_gpio = IO(gpio.cloneType).suggestName(s"gpio_$i")
io_gpio <> gpio
GPIOPinsPort(() => io_gpio, i)
}
(ports, Nil)
}
})
class WithI2CPunchthrough extends OverrideIOBinder({
(system: HasPeripheryI2C) => {
val ports = system.i2c.zipWithIndex.map { case (i2c, i) =>
val io_i2c = IO(i2c.cloneType).suggestName(s"i2c_$i")
io_i2c <> i2c
I2CPort(() => i2c)
}
(ports, Nil)
}
})
// DOC include start: WithUARTIOCells
class WithUARTIOCells extends OverrideIOBinder({
(system: HasPeripheryUART) => {
val (ports: Seq[UARTPort], cells2d) = system.uart.zipWithIndex.map({ case (u, i) =>
val p = system.asInstanceOf[BaseSubsystem].p
val (port, ios) = IOCell.generateIOFromSignal(u, s"uart_${i}", p(IOCellKey), abstractResetAsAsync = true)
val where = PBUS // TODO fix
val bus = system.asInstanceOf[HasTileLinkLocations].locateTLBusWrapper(where)
val freqMHz = bus.dtsFrequency.get / 1000000
(UARTPort(() => port, i, freqMHz.toInt), ios)
}).unzip
(ports, cells2d.flatten)
}
})
// DOC include end: WithUARTIOCells
class WithSPIIOPunchthrough extends OverrideLazyIOBinder({
(system: HasPeripherySPI) => {
// attach resource to 1st SPI
if (system.tlSpiNodes.size > 0) ResourceBinding {
Resource(new MMCDevice(system.tlSpiNodes.head.device, 1), "reg").bind(ResourceAddress(0))
}
InModuleBody {
val spi = system.spi
val ports = spi.zipWithIndex.map({ case (s, i) =>
val io_spi = IO(s.cloneType).suggestName(s"spi_$i")
io_spi <> s
SPIPort(() => io_spi)
})
(ports, Nil)
}
}
})
class WithSPIFlashIOCells extends OverrideIOBinder({
(system: HasPeripherySPIFlash) => {
val (ports: Seq[SPIFlashPort], cells2d) = system.qspi.zipWithIndex.map({ case (s, i) =>
val p = system.asInstanceOf[BaseSubsystem].p
val name = s"spi_${i}"
val port = IO(new SPIChipIO(s.c.csWidth)).suggestName(name)
val iocellBase = s"iocell_${name}"
// SCK and CS are unidirectional outputs
val sckIOs = IOCell.generateFromSignal(s.sck, port.sck, Some(s"${iocellBase}_sck"), p(IOCellKey), IOCell.toAsyncReset)
val csIOs = IOCell.generateFromSignal(s.cs, port.cs, Some(s"${iocellBase}_cs"), p(IOCellKey), IOCell.toAsyncReset)
// DQ are bidirectional, so then need special treatment
val dqIOs = s.dq.zip(port.dq).zipWithIndex.map { case ((pin, ana), j) =>
val iocell = p(IOCellKey).gpio().suggestName(s"${iocellBase}_dq_${j}")
iocell.io.o := pin.o
iocell.io.oe := pin.oe
iocell.io.ie := true.B
pin.i := iocell.io.i
iocell.io.pad <> ana
iocell
}
(SPIFlashPort(() => port, p(PeripherySPIFlashKey)(i), i), dqIOs ++ csIOs ++ sckIOs)
}).unzip
(ports, cells2d.flatten)
}
})
class WithExtInterruptIOCells extends OverrideIOBinder({
(system: HasExtInterruptsModuleImp) => {
if (system.outer.nExtInterrupts > 0) {
val (port: UInt, cells) = IOCell.generateIOFromSignal(system.interrupts, "ext_interrupts", system.p(IOCellKey), abstractResetAsAsync = true)
(Seq(ExtIntPort(() => port)), cells)
} else {
system.interrupts := DontCare // why do I have to drive this 0-wide wire???
(Nil, Nil)
}
}
})
// Rocketchip's JTAGIO exposes the oe signal, which doesn't go off-chip
class JTAGChipIO extends Bundle {
val TCK = Input(Clock())
val TMS = Input(Bool())
val TDI = Input(Bool())
val TDO = Output(Bool())
}
// WARNING: Don't disable syncReset unless you are trying to
// get around bugs in RTL simulators
class WithDebugIOCells(syncReset: Boolean = true) extends OverrideLazyIOBinder({
(system: HasPeripheryDebug) => {
implicit val p = GetSystemParameters(system)
val tlbus = system.asInstanceOf[BaseSubsystem].locateTLBusWrapper(p(ExportDebug).slaveWhere)
val clockSinkNode = system.debugOpt.map(_ => ClockSinkNode(Seq(ClockSinkParameters())))
clockSinkNode.map(_ := tlbus.fixedClockNode)
def clockBundle = clockSinkNode.get.in.head._1
InModuleBody { system.asInstanceOf[BaseSubsystem] match { case system: HasPeripheryDebug => {
system.debug.map({ debug =>
// We never use the PSDIO, so tie it off on-chip
system.psd.psd.foreach { _ <> 0.U.asTypeOf(new PSDTestMode) }
system.resetctrl.map { rcio => rcio.hartIsInReset.map { _ := clockBundle.reset.asBool } }
system.debug.map { d =>
// Tie off extTrigger
d.extTrigger.foreach { t =>
t.in.req := false.B
t.out.ack := t.out.req
}
// Tie off disableDebug
d.disableDebug.foreach { d => d := false.B }
// Drive JTAG on-chip IOs
d.systemjtag.map { j =>
j.reset := (if (syncReset) ResetCatchAndSync(j.jtag.TCK, clockBundle.reset.asBool) else clockBundle.reset.asBool)
j.mfr_id := p(JtagDTMKey).idcodeManufId.U(11.W)
j.part_number := p(JtagDTMKey).idcodePartNum.U(16.W)
j.version := p(JtagDTMKey).idcodeVersion.U(4.W)
}
}
Debug.connectDebugClockAndReset(Some(debug), clockBundle.clock)
// Add IOCells for the DMI/JTAG/APB ports
val dmiTuple = debug.clockeddmi.map { d =>
val (port, cells) = IOCell.generateIOFromSignal(d, "dmi", p(IOCellKey), abstractResetAsAsync = true)
(DMIPort(() => port), cells)
}
val jtagTuple = debug.systemjtag.map { j =>
val jtag_wire = Wire(new JTAGChipIO)
j.jtag.TCK := jtag_wire.TCK
j.jtag.TMS := jtag_wire.TMS
j.jtag.TDI := jtag_wire.TDI
jtag_wire.TDO := j.jtag.TDO.data
val (port, cells) = IOCell.generateIOFromSignal(jtag_wire, "jtag", p(IOCellKey), abstractResetAsAsync = true)
(JTAGPort(() => port), cells)
}
require(!debug.apb.isDefined)
val allTuples = (dmiTuple ++ jtagTuple).toSeq
(allTuples.map(_._1).toSeq, allTuples.flatMap(_._2).toSeq)
}).getOrElse((Nil, Nil))
}}}
}
})
class WithSerialTLIOCells extends OverrideIOBinder({
(system: CanHavePeripheryTLSerial) => {
val (ports, cells) = system.serial_tls.zipWithIndex.map({ case (s, id) =>
val sys = system.asInstanceOf[BaseSubsystem]
val (port, cells) = IOCell.generateIOFromSignal(s.getWrappedValue, s"serial_tl_$id", sys.p(IOCellKey), abstractResetAsAsync = true)
(SerialTLPort(() => port, sys.p(SerialTLKey)(id), system.serdessers(id), id), cells)
}).unzip
(ports.toSeq, cells.flatten.toSeq)
}
})
class WithChipIdIOCells extends OverrideIOBinder({
(system: CanHavePeripheryChipIdPin) => system.chip_id_pin.map({ p =>
val sys = system.asInstanceOf[BaseSubsystem]
val (port, cells) = IOCell.generateIOFromSignal(p.getWrappedValue, s"chip_id", sys.p(IOCellKey), abstractResetAsAsync = true)
(Seq(ChipIdPort(() => port)), cells)
}).getOrElse(Nil, Nil)
})
class WithSerialTLPunchthrough extends OverrideIOBinder({
(system: CanHavePeripheryTLSerial) => {
val (ports, cells) = system.serial_tls.zipWithIndex.map({ case (s, id) =>
val sys = system.asInstanceOf[BaseSubsystem]
val port = IO(chiselTypeOf(s.getWrappedValue))
port <> s.getWrappedValue
(SerialTLPort(() => port, sys.p(SerialTLKey)(id), system.serdessers(id), id), Nil)
}).unzip
(ports.toSeq, cells.flatten.toSeq)
}
})
class WithAXI4MemPunchthrough extends OverrideLazyIOBinder({
(system: CanHaveMasterAXI4MemPort) => {
implicit val p: Parameters = GetSystemParameters(system)
val clockSinkNode = p(ExtMem).map(_ => ClockSinkNode(Seq(ClockSinkParameters())))
clockSinkNode.map(_ := system.asInstanceOf[HasTileLinkLocations].locateTLBusWrapper(MBUS).fixedClockNode)
def clockBundle = clockSinkNode.get.in.head._1
InModuleBody {
val ports: Seq[AXI4MemPort] = system.mem_axi4.zipWithIndex.map({ case (m, i) =>
val port = IO(new ClockedIO(DataMirror.internal.chiselTypeClone[AXI4Bundle](m))).suggestName(s"axi4_mem_${i}")
port.bits <> m
port.clock := clockBundle.clock
AXI4MemPort(() => port, p(ExtMem).get, system.memAXI4Node.edges.in(i), p(MemoryBusKey).dtsFrequency.get.toInt)
}).toSeq
(ports, Nil)
}
}
})
class WithAXI4MMIOPunchthrough extends OverrideLazyIOBinder({
(system: CanHaveMasterAXI4MMIOPort) => {
implicit val p: Parameters = GetSystemParameters(system)
val clockSinkNode = p(ExtBus).map(_ => ClockSinkNode(Seq(ClockSinkParameters())))
clockSinkNode.map(_ := system.asInstanceOf[HasTileLinkLocations].locateTLBusWrapper(SBUS).fixedClockNode)
def clockBundle = clockSinkNode.get.in.head._1
InModuleBody {
val ports: Seq[AXI4MMIOPort] = system.mmio_axi4.zipWithIndex.map({ case (m, i) =>
val port = IO(new ClockedIO(DataMirror.internal.chiselTypeClone[AXI4Bundle](m))).suggestName(s"axi4_mmio_${i}")
port.bits <> m
port.clock := clockBundle.clock
AXI4MMIOPort(() => port, p(ExtBus).get, system.mmioAXI4Node.edges.in(i))
}).toSeq
(ports, Nil)
}
}
})
class WithL2FBusAXI4Punchthrough extends OverrideLazyIOBinder({
(system: CanHaveSlaveAXI4Port) => {
implicit val p: Parameters = GetSystemParameters(system)
val clockSinkNode = p(ExtIn).map(_ => ClockSinkNode(Seq(ClockSinkParameters())))
val fbus = system.asInstanceOf[HasTileLinkLocations].locateTLBusWrapper(FBUS)
clockSinkNode.map(_ := fbus.fixedClockNode)
def clockBundle = clockSinkNode.get.in.head._1
InModuleBody {
val ports: Seq[AXI4InPort] = system.l2_frontend_bus_axi4.zipWithIndex.map({ case (m, i) =>
val port = IO(new ClockedIO(Flipped(DataMirror.internal.chiselTypeClone[AXI4Bundle](m)))).suggestName(s"axi4_fbus_${i}")
m <> port.bits
port.clock := clockBundle.clock
AXI4InPort(() => port, p(ExtIn).get)
}).toSeq
(ports, Nil)
}
}
})
class WithBlockDeviceIOPunchthrough extends OverrideIOBinder({
(system: CanHavePeripheryBlockDevice) => {
val ports: Seq[BlockDevicePort] = system.bdev.map({ bdev =>
val p = GetSystemParameters(system)
val bdParams = p(BlockDeviceKey).get
val port = IO(new ClockedIO(new BlockDeviceIO(bdParams))).suggestName("blockdev")
port <> bdev
BlockDevicePort(() => port, bdParams)
}).toSeq
(ports, Nil)
}
})
class WithNICIOPunchthrough extends OverrideIOBinder({
(system: CanHavePeripheryIceNIC) => {
val ports: Seq[NICPort] = system.icenicOpt.map({ n =>
val p = GetSystemParameters(system)
val port = IO(new ClockedIO(new NICIOvonly)).suggestName("nic")
port <> n
NICPort(() => port, p(NICKey).get)
}).toSeq
(ports, Nil)
}
})
class WithTraceGenSuccessPunchthrough extends OverrideIOBinder({
(system: TraceGenSystemModuleImp) => {
val success: Bool = IO(Output(Bool())).suggestName("success")
success := system.success
(Seq(SuccessPort(() => success)), Nil)
}
})
class WithTraceIOPunchthrough extends OverrideLazyIOBinder({
(system: CanHaveTraceIO) => InModuleBody {
val ports: Option[TracePort] = system.traceIO.map { t =>
val trace = IO(DataMirror.internal.chiselTypeClone[TraceOutputTop](t)).suggestName("trace")
trace <> t
val p = GetSystemParameters(system)
val chipyardSystem = system.asInstanceOf[ChipyardSystem]
val tiles = chipyardSystem.totalTiles.values
val viewpointBus = system.asInstanceOf[HasConfigurableTLNetworkTopology].viewpointBus
val mems = viewpointBus.unifyManagers.filter { m =>
val regionTypes = Seq(RegionType.CACHED, RegionType.TRACKED, RegionType.UNCACHED, RegionType.IDEMPOTENT)
val ignoreAddresses = Seq(
0x10000 // bootrom is handled specially
)
regionTypes.contains(m.regionType) && !ignoreAddresses.contains(m.address.map(_.base).min)
}.map { m =>
val base = m.address.map(_.base).min
val size = m.address.map(_.max).max - base + 1
(base, size)
}
val useSimDTM = p(ExportDebug).protocols.contains(DMI) // assume that exposing clockeddmi means we will connect SimDTM
val cfg = SpikeCosimConfig(
isa = tiles.headOption.map(_.isaDTS).getOrElse(""),
priv = tiles.headOption.map(t => if (t.usingUser) "MSU" else if (t.usingSupervisor) "MS" else "M").getOrElse(""),
maxpglevels = tiles.headOption.map(_.tileParams.core.pgLevels).getOrElse(0),
pmpregions = tiles.headOption.map(_.tileParams.core.nPMPs).getOrElse(0),
nharts = tiles.size,
bootrom = chipyardSystem.bootROM.map(_.module.contents.toArray.mkString(" ")).getOrElse(""),
has_dtm = useSimDTM,
mems = mems,
// Connect using the legacy API for firesim only
mem0_base = p(ExtMem).map(_.master.base).getOrElse(BigInt(0)),
mem0_size = p(ExtMem).map(_.master.size).getOrElse(BigInt(0)),
)
TracePort(() => trace, cfg)
}
(ports.toSeq, Nil)
}
})
class WithCustomBootPin extends OverrideIOBinder({
(system: CanHavePeripheryCustomBootPin) => system.custom_boot_pin.map({ p =>
val sys = system.asInstanceOf[BaseSubsystem]
val (port, cells) = IOCell.generateIOFromSignal(p.getWrappedValue, "custom_boot", sys.p(IOCellKey), abstractResetAsAsync = true)
(Seq(CustomBootPort(() => port)), cells)
}).getOrElse((Nil, Nil))
})
class WithUARTTSIPunchthrough extends OverrideIOBinder({
(system: CanHavePeripheryUARTTSI) => system.uart_tsi.map({ p =>
val sys = system.asInstanceOf[BaseSubsystem]
val uart_tsi = IO(new UARTTSIIO(p.uartParams))
uart_tsi <> p
(Seq(UARTTSIPort(() => uart_tsi)), Nil)
}).getOrElse((Nil, Nil))
})
class WithTLMemPunchthrough extends OverrideIOBinder({
(system: CanHaveMasterTLMemPort) => {
val io_tl_mem_pins_temp = IO(DataMirror.internal.chiselTypeClone[HeterogeneousBag[TLBundle]](system.mem_tl)).suggestName("tl_slave")
io_tl_mem_pins_temp <> system.mem_tl
(Seq(TLMemPort(() => io_tl_mem_pins_temp)), Nil)
}
})
class WithDontTouchPorts extends OverrideIOBinder({
(system: DontTouch) => system.dontTouchPorts(); (Nil, Nil)
})
class WithNMITiedOff extends ComposeIOBinder({
(system: HasHierarchicalElementsRootContextModuleImp) => {
system.nmi.foreach { nmi =>
nmi.rnmi := false.B
nmi.rnmi_interrupt_vector := 0.U
nmi.rnmi_exception_vector := 0.U
}
(Nil, Nil)
}
})
class WithGCDBusyPunchthrough extends OverrideIOBinder({
(system: CanHavePeripheryGCD) => system.gcd_busy.map { busy =>
val io_gcd_busy = IO(Output(Bool()))
io_gcd_busy := busy
(Seq(GCDBusyPort(() => io_gcd_busy)), Nil)
}.getOrElse((Nil, Nil))
})
File ClockBinders.scala:
package chipyard.clocking
import chisel3._
import chisel3.util._
import chipyard.iobinders._
import freechips.rocketchip.prci._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.tilelink._
import chipyard.iocell._
// This uses the FakePLL, which uses a ClockAtFreq Verilog blackbox to generate
// the requested clocks. This also adds TileLink ClockDivider and ClockSelector
// blocks, which allow memory-mapped control of clock division, and clock muxing
// between the FakePLL and the slow off-chip clock
// Note: This will not simulate properly with firesim
// Unsetting enable will prevent the divider/selector from actually modifying the clock,
// while preserving the address map. Unsetting enable should only be done for RTL
// simulators (Verilator) which do not model reset properly
class WithPLLSelectorDividerClockGenerator(enable: Boolean = true) extends OverrideLazyIOBinder({
(system: HasChipyardPRCI) => {
// Connect the implicit clock
implicit val p = GetSystemParameters(system)
val tlbus = system.asInstanceOf[BaseSubsystem].locateTLBusWrapper(system.prciParams.slaveWhere)
val baseAddress = system.prciParams.baseAddress
val clockDivider = system.prci_ctrl_domain { LazyModule(new TLClockDivider (baseAddress + 0x20000, tlbus.beatBytes, enable=enable)) }
val clockSelector = system.prci_ctrl_domain { LazyModule(new TLClockSelector(baseAddress + 0x30000, tlbus.beatBytes, enable=enable)) }
val pllCtrl = system.prci_ctrl_domain { LazyModule(new FakePLLCtrl (baseAddress + 0x40000, tlbus.beatBytes)) }
clockDivider.tlNode := system.prci_ctrl_domain { TLFragmenter(tlbus, Some("ClockDivider")) := system.prci_ctrl_bus.get }
clockSelector.tlNode := system.prci_ctrl_domain { TLFragmenter(tlbus, Some("ClockSelector")) := system.prci_ctrl_bus.get }
pllCtrl.tlNode := system.prci_ctrl_domain { TLFragmenter(tlbus, Some("PLLCtrl")) := system.prci_ctrl_bus.get }
system.chiptopClockGroupsNode := clockDivider.clockNode := clockSelector.clockNode
// Connect all other requested clocks
val slowClockSource = ClockSourceNode(Seq(ClockSourceParameters()))
val pllClockSource = ClockSourceNode(Seq(ClockSourceParameters()))
// The order of the connections to clockSelector.clockNode configures the inputs
// of the clockSelector's clockMux. Default to using the slowClockSource,
// software should enable the PLL, then switch to the pllClockSource
clockSelector.clockNode := slowClockSource
clockSelector.clockNode := pllClockSource
val pllCtrlSink = BundleBridgeSink[FakePLLCtrlBundle]()
pllCtrlSink := pllCtrl.ctrlNode
InModuleBody {
val clock_wire = Wire(Input(Clock()))
val reset_wire = Wire(Input(AsyncReset()))
val (clock_io, clockIOCell) = IOCell.generateIOFromSignal(clock_wire, "clock", p(IOCellKey))
val (reset_io, resetIOCell) = IOCell.generateIOFromSignal(reset_wire, "reset", p(IOCellKey))
slowClockSource.out.unzip._1.map { o =>
o.clock := clock_wire
o.reset := reset_wire
}
// For a real chip you should replace this ClockSourceAtFreqFromPlusArg
// with a blackbox of whatever PLL is being integrated
val fake_pll = Module(new ClockSourceAtFreqFromPlusArg("pll_freq_mhz"))
fake_pll.io.power := pllCtrlSink.in(0)._1.power
fake_pll.io.gate := pllCtrlSink.in(0)._1.gate
pllClockSource.out.unzip._1.map { o =>
o.clock := fake_pll.io.clk
o.reset := reset_wire
}
(Seq(ClockPort(() => clock_io, 100), ResetPort(() => reset_io)), clockIOCell ++ resetIOCell)
}
}
})
// This passes all clocks through to the TestHarness
class WithPassthroughClockGenerator extends OverrideLazyIOBinder({
(system: HasChipyardPRCI) => {
implicit val p = GetSystemParameters(system)
// This aggregate node should do nothing
val clockGroupAggNode = ClockGroupAggregateNode("fake")
val clockGroupsSourceNode = ClockGroupSourceNode(Seq(ClockGroupSourceParameters()))
system.chiptopClockGroupsNode := clockGroupAggNode := clockGroupsSourceNode
InModuleBody {
val reset_io = IO(Input(AsyncReset()))
require(clockGroupAggNode.out.size == 1)
val (bundle, edge) = clockGroupAggNode.out(0)
val clock_ios = (bundle.member.data zip edge.sink.members).map { case (b, m) =>
require(m.take.isDefined, s"""Clock ${m.name.get} has no requested frequency
|Clocks: ${edge.sink.members.map(_.name.get)}""".stripMargin)
val freq = m.take.get.freqMHz
val clock_io = IO(Input(Clock())).suggestName(s"clock_${m.name.get}")
b.clock := clock_io
b.reset := reset_io
ClockPort(() => clock_io, freq)
}.toSeq
((clock_ios :+ ResetPort(() => reset_io)), Nil)
}
}
})
// Broadcasts a single clock IO to all clock domains. Ignores all requested frequencies
class WithSingleClockBroadcastClockGenerator(freqMHz: Int = 100) extends OverrideLazyIOBinder({
(system: HasChipyardPRCI) => {
implicit val p = GetSystemParameters(system)
val clockGroupsAggregator = LazyModule(new ClockGroupAggregator("single_clock"))
val clockGroupsSourceNode = ClockGroupSourceNode(Seq(ClockGroupSourceParameters()))
system.chiptopClockGroupsNode :*= clockGroupsAggregator.node := clockGroupsSourceNode
InModuleBody {
val clock_wire = Wire(Input(Clock()))
val reset_wire = Wire(Input(AsyncReset()))
val (clock_io, clockIOCell) = IOCell.generateIOFromSignal(clock_wire, "clock", p(IOCellKey))
val (reset_io, resetIOCell) = IOCell.generateIOFromSignal(reset_wire, "reset", p(IOCellKey))
clockGroupsSourceNode.out.foreach { case (bundle, edge) =>
bundle.member.data.foreach { b =>
b.clock := clock_io
b.reset := reset_io
}
}
(Seq(ClockPort(() => clock_io, freqMHz), ResetPort(() => reset_io)), clockIOCell ++ resetIOCell)
}
}
})
class WithClockTapIOCells extends OverrideIOBinder({
(system: CanHaveClockTap) => {
system.clockTapIO.map { tap =>
val (clock_tap_io, clock_tap_cell) = IOCell.generateIOFromSignal(tap.getWrappedValue, "clock_tap")
(Seq(ClockTapPort(() => clock_tap_io)), clock_tap_cell)
}.getOrElse((Nil, Nil))
}
})
| module ChipTop( // @[ChipTop.scala:33:44]
output uart_0_txd, // @[IOCell.scala:196:23]
input uart_0_rxd, // @[IOCell.scala:196:23]
output axi4_mem_0_clock, // @[IOBinders.scala:397:22]
input axi4_mem_0_bits_aw_ready, // @[IOBinders.scala:397:22]
output axi4_mem_0_bits_aw_valid, // @[IOBinders.scala:397:22]
output [3:0] axi4_mem_0_bits_aw_bits_id, // @[IOBinders.scala:397:22]
output [31:0] axi4_mem_0_bits_aw_bits_addr, // @[IOBinders.scala:397:22]
output [7:0] axi4_mem_0_bits_aw_bits_len, // @[IOBinders.scala:397:22]
output [2:0] axi4_mem_0_bits_aw_bits_size, // @[IOBinders.scala:397:22]
output [1:0] axi4_mem_0_bits_aw_bits_burst, // @[IOBinders.scala:397:22]
output axi4_mem_0_bits_aw_bits_lock, // @[IOBinders.scala:397:22]
output [3:0] axi4_mem_0_bits_aw_bits_cache, // @[IOBinders.scala:397:22]
output [2:0] axi4_mem_0_bits_aw_bits_prot, // @[IOBinders.scala:397:22]
output [3:0] axi4_mem_0_bits_aw_bits_qos, // @[IOBinders.scala:397:22]
input axi4_mem_0_bits_w_ready, // @[IOBinders.scala:397:22]
output axi4_mem_0_bits_w_valid, // @[IOBinders.scala:397:22]
output [63:0] axi4_mem_0_bits_w_bits_data, // @[IOBinders.scala:397:22]
output [7:0] axi4_mem_0_bits_w_bits_strb, // @[IOBinders.scala:397:22]
output axi4_mem_0_bits_w_bits_last, // @[IOBinders.scala:397:22]
output axi4_mem_0_bits_b_ready, // @[IOBinders.scala:397:22]
input axi4_mem_0_bits_b_valid, // @[IOBinders.scala:397:22]
input [3:0] axi4_mem_0_bits_b_bits_id, // @[IOBinders.scala:397:22]
input [1:0] axi4_mem_0_bits_b_bits_resp, // @[IOBinders.scala:397:22]
input axi4_mem_0_bits_ar_ready, // @[IOBinders.scala:397:22]
output axi4_mem_0_bits_ar_valid, // @[IOBinders.scala:397:22]
output [3:0] axi4_mem_0_bits_ar_bits_id, // @[IOBinders.scala:397:22]
output [31:0] axi4_mem_0_bits_ar_bits_addr, // @[IOBinders.scala:397:22]
output [7:0] axi4_mem_0_bits_ar_bits_len, // @[IOBinders.scala:397:22]
output [2:0] axi4_mem_0_bits_ar_bits_size, // @[IOBinders.scala:397:22]
output [1:0] axi4_mem_0_bits_ar_bits_burst, // @[IOBinders.scala:397:22]
output axi4_mem_0_bits_ar_bits_lock, // @[IOBinders.scala:397:22]
output [3:0] axi4_mem_0_bits_ar_bits_cache, // @[IOBinders.scala:397:22]
output [2:0] axi4_mem_0_bits_ar_bits_prot, // @[IOBinders.scala:397:22]
output [3:0] axi4_mem_0_bits_ar_bits_qos, // @[IOBinders.scala:397:22]
output axi4_mem_0_bits_r_ready, // @[IOBinders.scala:397:22]
input axi4_mem_0_bits_r_valid, // @[IOBinders.scala:397:22]
input [3:0] axi4_mem_0_bits_r_bits_id, // @[IOBinders.scala:397:22]
input [63:0] axi4_mem_0_bits_r_bits_data, // @[IOBinders.scala:397:22]
input [1:0] axi4_mem_0_bits_r_bits_resp, // @[IOBinders.scala:397:22]
input axi4_mem_0_bits_r_bits_last, // @[IOBinders.scala:397:22]
input custom_boot, // @[IOCell.scala:196:23]
input jtag_TCK, // @[IOCell.scala:196:23]
input jtag_TMS, // @[IOCell.scala:196:23]
input jtag_TDI, // @[IOCell.scala:196:23]
output jtag_TDO, // @[IOCell.scala:196:23]
output trace_traces_0_clock, // @[IOBinders.scala:482:21]
output trace_traces_0_reset, // @[IOBinders.scala:482:21]
output trace_traces_0_trace_insns_0_valid, // @[IOBinders.scala:482:21]
output [39:0] trace_traces_0_trace_insns_0_iaddr, // @[IOBinders.scala:482:21]
output [31:0] trace_traces_0_trace_insns_0_insn, // @[IOBinders.scala:482:21]
output [2:0] trace_traces_0_trace_insns_0_priv, // @[IOBinders.scala:482:21]
output trace_traces_0_trace_insns_0_exception, // @[IOBinders.scala:482:21]
output trace_traces_0_trace_insns_0_interrupt, // @[IOBinders.scala:482:21]
output [63:0] trace_traces_0_trace_insns_0_cause, // @[IOBinders.scala:482:21]
output [39:0] trace_traces_0_trace_insns_0_tval, // @[IOBinders.scala:482:21]
output [127:0] trace_traces_0_trace_insns_0_wdata, // @[IOBinders.scala:482:21]
output [63:0] trace_traces_0_trace_time, // @[IOBinders.scala:482:21]
input reset_io, // @[ClockBinders.scala:87:24]
input clock_uncore, // @[ClockBinders.scala:95:26]
output clock_tap, // @[IOCell.scala:196:23]
output serial_tl_0_in_ready, // @[IOCell.scala:196:23]
input serial_tl_0_in_valid, // @[IOCell.scala:196:23]
input [31:0] serial_tl_0_in_bits_phit, // @[IOCell.scala:196:23]
input serial_tl_0_out_ready, // @[IOCell.scala:196:23]
output serial_tl_0_out_valid, // @[IOCell.scala:196:23]
output [31:0] serial_tl_0_out_bits_phit, // @[IOCell.scala:196:23]
input serial_tl_0_clock_in // @[IOCell.scala:196:23]
);
wire _iocell_serial_tl_0_in_valid_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_31_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_30_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_29_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_28_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_27_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_26_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_25_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_24_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_23_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_22_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_21_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_20_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_19_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_18_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_17_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_16_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_15_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_14_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_13_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_12_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_11_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_10_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_9_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_8_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_7_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_6_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_5_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_4_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_3_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_2_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_1_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_in_bits_phit_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_out_ready_i; // @[IOCell.scala:176:23]
wire _iocell_serial_tl_0_out_bits_phit_31_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_30_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_29_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_28_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_27_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_26_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_25_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_24_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_23_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_22_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_21_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_20_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_19_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_18_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_17_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_16_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_15_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_14_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_13_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_12_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_11_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_10_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_9_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_8_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_7_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_6_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_5_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_4_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_3_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_2_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_1_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_out_bits_phit_pad; // @[IOCell.scala:177:24]
wire _iocell_serial_tl_0_clock_in_i; // @[IOCell.scala:176:23]
wire _iocell_jtag_TCK_i; // @[IOCell.scala:176:23]
wire _iocell_jtag_TMS_i; // @[IOCell.scala:176:23]
wire _iocell_jtag_TDI_i; // @[IOCell.scala:176:23]
wire _gated_clock_debug_clock_gate_out; // @[ClockGate.scala:36:20]
wire _dmactiveAck_dmactiveAck_io_q; // @[ShiftReg.scala:45:23]
wire _debug_reset_syncd_debug_reset_sync_io_q; // @[ShiftReg.scala:45:23]
wire _system_debug_systemjtag_reset_catcher_io_sync_reset; // @[ResetCatchAndSync.scala:39:28]
wire _iocell_custom_boot_i; // @[IOCell.scala:176:23]
wire _iocell_uart_0_rxd_i; // @[IOCell.scala:176:23]
wire _system_auto_cbus_fixedClockNode_anon_out_clock; // @[ChipTop.scala:27:35]
wire _system_auto_cbus_fixedClockNode_anon_out_reset; // @[ChipTop.scala:27:35]
wire _system_debug_systemjtag_jtag_TDO_data; // @[ChipTop.scala:27:35]
wire _system_debug_dmactive; // @[ChipTop.scala:27:35]
wire _system_serial_tl_0_in_ready; // @[ChipTop.scala:27:35]
wire _system_serial_tl_0_out_valid; // @[ChipTop.scala:27:35]
wire [31:0] _system_serial_tl_0_out_bits_phit; // @[ChipTop.scala:27:35]
wire _system_uart_0_txd; // @[ChipTop.scala:27:35]
wire _system_clock_tap; // @[ChipTop.scala:27:35]
wire debug_reset = ~_debug_reset_syncd_debug_reset_sync_io_q; // @[ShiftReg.scala:45:23]
reg clock_en; // @[Periphery.scala:298:29]
always @(posedge _system_auto_cbus_fixedClockNode_anon_out_clock or posedge debug_reset) begin // @[Periphery.scala:290:40]
if (debug_reset) // @[Periphery.scala:290:40]
clock_en <= 1'h1; // @[Periphery.scala:298:29]
else // @[ChipTop.scala:27:35]
clock_en <= _dmactiveAck_dmactiveAck_io_q; // @[ShiftReg.scala:45:23]
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File LoopConv.scala:
package gemmini
import chisel3._
import chisel3.util._
import chisel3.experimental._
import freechips.rocketchip.tile.RoCCCommand
import org.chipsalliance.cde.config.Parameters
import GemminiISA._
import LocalAddr._
import Util._
class LoopConvOuterBounds(val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int) extends Bundle {
val batch_size = UInt(large_iterator_bitwidth.W)
val in_row_dim = UInt(small_iterator_bitwidth.W)
val in_col_dim = UInt(small_iterator_bitwidth.W)
val in_channels = UInt(large_iterator_bitwidth.W)
val out_channels = UInt(large_iterator_bitwidth.W)
val out_col_dim = UInt(large_iterator_bitwidth.W)
val out_row_dim = UInt(large_iterator_bitwidth.W)
val out_stride = UInt(large_iterator_bitwidth.W) //stride for output activation
val in_stride = UInt(large_iterator_bitwidth.W) //stride for input activation
val weight_stride = UInt(large_iterator_bitwidth.W) //stride for weight
val pool_out_row_dim = UInt(small_iterator_bitwidth.W)
val pool_out_col_dim = UInt(small_iterator_bitwidth.W)
val stride = UInt(tiny_iterator_bitwidth.W)
val padding = UInt(tiny_iterator_bitwidth.W)
val kernel_dim = UInt(tiny_iterator_bitwidth.W)
val kernel_dilation = UInt(tiny_iterator_bitwidth.W)
val pool_size = UInt(tiny_iterator_bitwidth.W)
val pool_stride = UInt(tiny_iterator_bitwidth.W)
val pool_padding = UInt(tiny_iterator_bitwidth.W)
}
class LoopConvInnerBounds(val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int) extends Bundle {
val batches = UInt(large_iterator_bitwidth.W)
val porows = UInt(small_iterator_bitwidth.W)
val pocols = UInt(small_iterator_bitwidth.W)
val pochs = UInt(large_iterator_bitwidth.W)
val krows = UInt(tiny_iterator_bitwidth.W)
val kcols = UInt(tiny_iterator_bitwidth.W)
val kchs = UInt(large_iterator_bitwidth.W)
val lpad = UInt(tiny_iterator_bitwidth.W)
val rpad = UInt(tiny_iterator_bitwidth.W)
val upad = UInt(tiny_iterator_bitwidth.W)
val dpad = UInt(tiny_iterator_bitwidth.W)
val plpad = UInt(tiny_iterator_bitwidth.W)
val prad = UInt(tiny_iterator_bitwidth.W)
val pupad = UInt(tiny_iterator_bitwidth.W)
val pdpad = UInt(tiny_iterator_bitwidth.W)
val orows = UInt(small_iterator_bitwidth.W)
val ocols = UInt(small_iterator_bitwidth.W)
}
class LoopConvDerivedParams(val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int) extends Bundle {
val ochs = UInt(large_iterator_bitwidth.W)
val irows = UInt(small_iterator_bitwidth.W)
val icols = UInt(small_iterator_bitwidth.W)
val irows_unpadded = UInt(small_iterator_bitwidth.W)
val icols_unpadded = UInt(small_iterator_bitwidth.W)
val ichs = UInt(large_iterator_bitwidth.W)
val out_channels_per_bank = UInt(small_iterator_bitwidth.W) // TODO this won't work for systolic arrays above 256 in size
val in_channels_per_bank = UInt(small_iterator_bitwidth.W) // TODO this won't work for systolic arrays above 256 in size
val bias_spad_stride = UInt(large_iterator_bitwidth.W)
val input_spad_stride = UInt(large_iterator_bitwidth.W)
val weight_spad_stride = UInt(large_iterator_bitwidth.W)
// val ex_overwrite = Bool()
}
class LoopConvLdBiasReq(val coreMaxAddrBits: Int, val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val derived_params = new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val addr_start = UInt(log2Up(max_acc_addr).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val no_bias = Bool()
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopConvLdBias(block_size: Int, coreMaxAddrBits: Int, large_iterator_bitwidth: Int, small_iterator_bitwidth: Int, tiny_iterator_bitwidth: Int, max_acc_addr: Int, acc_w: Int,
max_block_len_acc: Int, concurrent_loops: Int, latency: Int,
config_mvin_rs1_t: ConfigMvinRs1, mvin_rs2_t: MvinRs2)(implicit p: Parameters) extends Module {
val MVIN_SCALE_IDENTITY = 0x3f800000.U // TODO get this from configs somehow
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopConvLdBiasReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth: Int, max_acc_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val wait_for_prev_loop = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, config, ld = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopConvLdBiasReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth: Int, max_acc_addr, concurrent_loops))
import req.inner_bounds._
import req.derived_params._
val acc_addr_start = req.addr_start
// Derived parameters
val max_ochs_per_mvin = Mux(ochs < (max_block_len_acc * block_size).U, ochs, (max_block_len_acc * block_size).U)
val skip = req.dram_addr === 0.U
// Iterators
val b = Reg(UInt(large_iterator_bitwidth.W))
val orow = Reg(UInt(small_iterator_bitwidth.W))
val ocol = Reg(UInt(small_iterator_bitwidth.W))
val och = Reg(UInt(large_iterator_bitwidth.W))
// Addresses
val dram_offset = och * (acc_w/8).U
val dram_addr = Mux(req.no_bias, 0.U, req.dram_addr + LoopConv.castDramOffset(dram_offset))
val spad_addr = acc_addr_start +& (och / block_size.U(och.getWidth.W)) * batches * orows * ocols +& b * orows * ocols +& orow * ocols +& ocol
// Sizes
val I = Mux(ocols - ocol > block_size.U, block_size.U, ocols - ocol)
val J = Mux(ochs - och > max_ochs_per_mvin, max_ochs_per_mvin, ochs - och)
class RoCCCommandWithAddr extends Bundle {
val cmd = new RoCCCommand
val dram_addr = UInt()
val spad_addr = UInt()
val I = UInt()
val J = UInt()
}
val command_p = Module(new Pipeline[RoCCCommandWithAddr](new RoCCCommandWithAddr, latency)())
// Commands
val config_cmd = Wire(new RoCCCommand)
config_cmd := DontCare
config_cmd.inst.funct := CONFIG_CMD
val config_cmd_rs1 = Wire(config_mvin_rs1_t.cloneType)
config_cmd_rs1 := DontCare
config_cmd_rs1.scale := MVIN_SCALE_IDENTITY
config_cmd_rs1.stride := req.derived_params.bias_spad_stride
config_cmd_rs1.pixel_repeats := 1.U
config_cmd_rs1.state_id := 2.U
config_cmd_rs1.shrink := 0.U
config_cmd_rs1._unused := 1.U
config_cmd.rs1 := config_cmd_rs1.asUInt
config_cmd.rs2 := 0.U
val mvin_cmd = Wire(new RoCCCommand)
mvin_cmd := DontCare
mvin_cmd.inst.funct := LOAD3_CMD
mvin_cmd.rs1 := 0.U
mvin_cmd.rs2 := 0.U
// Inputs and outputs
io.req.ready := state === idle && !command_p.io.busy
io.idle := state === idle && !command_p.io.busy
io.loop_id := req.loop_id
command_p.io.in.valid := state =/= idle && !io.wait_for_prev_loop && !skip
command_p.io.in.bits.cmd := Mux(state === config, config_cmd, mvin_cmd)
command_p.io.in.bits.dram_addr := dram_addr
command_p.io.in.bits.spad_addr := spad_addr
command_p.io.in.bits.I := I
command_p.io.in.bits.J := J
command_p.io.out.ready := io.cmd.ready && !io.rob_overloaded
io.cmd.valid := command_p.io.out.valid && !io.rob_overloaded
io.cmd.bits := command_p.io.out.bits.cmd
when (command_p.io.out.bits.cmd.inst.funct === LOAD3_CMD) {
val o = command_p.io.out.bits
io.cmd.bits.rs1 := o.dram_addr
val mvin_cmd_rs2 = Wire(mvin_rs2_t.cloneType)
mvin_cmd_rs2 := DontCare
mvin_cmd_rs2.num_rows := o.I.asUInt
mvin_cmd_rs2.num_cols := o.J.asUInt
mvin_cmd_rs2.local_addr := cast_to_acc_addr(mvin_cmd_rs2.local_addr, o.spad_addr, accumulate = false.B, read_full = false.B)
io.cmd.bits.rs2 := mvin_cmd_rs2.asUInt
}
// Sending outputs
when (skip) {
state := idle
}.elsewhen(command_p.io.in.fire) {
when (state === config) {
state := ld
}.otherwise {
val next_och = floorAdd(och, max_ochs_per_mvin, ochs)
val next_ocol = floorAdd(ocol, block_size.U, ocols, next_och === 0.U)
val next_orow = floorAdd(orow, 1.U, orows, next_ocol === 0.U && next_och === 0.U)
val next_b = floorAdd(b, 1.U, batches, next_orow === 0.U && next_ocol === 0.U && next_och === 0.U)
och := next_och
ocol := next_ocol
orow := next_orow
b := next_b
state := Mux(next_b === 0.U && next_orow === 0.U && next_ocol === 0.U && next_och === 0.U,
idle, ld)
}
}
// Accepting requests
when (io.req.fire) {
req := io.req.bits
state := config
b := 0.U
orow := 0.U
ocol := 0.U
och := 0.U
}
}
class LoopConvLdInputReq(val coreMaxAddrBits: Int, val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val derived_params = new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val addr_start = UInt(log2Up(max_acc_addr).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val downsample = Bool()
val max_pixels_per_row = UInt(small_iterator_bitwidth.W)
val input_dilated = Bool()
val trans_input_3120 = Bool()
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopConvLdInput(block_size: Int, coreMaxAddrBits: Int, large_iterator_bitwidth: Int, small_iterator_bitwidth: Int,
tiny_iterator_bitwidth: Int, max_addr: Int, input_w: Int, max_block_len: Int,
concurrent_loops: Int, latency: Int, config_mvin_rs1_t: ConfigMvinRs1, mvin_rs2_t: MvinRs2)
(implicit p: Parameters) extends Module {
val MVIN_SCALE_IDENTITY = 0x3f800000.U // TODO get this from configs somehow
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopConvLdInputReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val wait_for_prev_loop = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, config, ld = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopConvLdInputReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, concurrent_loops))
import req.outer_bounds._
import req.inner_bounds._
import req.derived_params._
def undilated(x: UInt): UInt = (x +& req.input_dilated) >> req.input_dilated
// Derived parameters
val max_ichs_per_mvin = Mux(ichs < (max_block_len * block_size).U, ichs, (max_block_len * block_size).U).zext
val max_batches_per_mvin = Mux(batches < (max_block_len * block_size).U, batches, (max_block_len * block_size).U).zext
val max_chs_per_mvin = Mux(req.trans_input_3120, max_batches_per_mvin, max_ichs_per_mvin)
// Iterators
val b = Reg(SInt(large_iterator_bitwidth.W))
val irow = Reg(SInt(small_iterator_bitwidth.W))
val icol = Reg(SInt(small_iterator_bitwidth.W))
val ich = Reg(SInt(large_iterator_bitwidth.W))
// Calculated params
val irow_padded = irow +& undilated(upad).zext
val icol_padded = icol +& undilated(lpad).zext
val is_zeros = irow < 0.S || irow >= irows_unpadded.zext || icol < 0.S || icol >= icols_unpadded.zext
val dram_stride = Mux(req.trans_input_3120, batch_size * (input_w/8).U, in_stride * (input_w/8).U)
// Addresses
val dram_offset = Mux(req.trans_input_3120, (((ich * in_col_dim * in_row_dim +& irow*in_col_dim +& icol) * batches +& b) * (input_w/8).U).asUInt,
(((b * in_row_dim * in_col_dim +& irow*in_col_dim +& icol) * in_stride +& ich) * (input_w/8).U).asUInt)
val dram_addr = Mux(is_zeros, 0.U, req.dram_addr + LoopConv.castDramOffset(dram_offset))
val spad_addr = Mux(req.trans_input_3120,
// To prevent Verilator errors, we replace some "/ block_size.U" calls here with ">> log2Up(block_size)"
req.addr_start.zext +& (b >> log2Up(block_size)) * input_spad_stride +& ich * (irows >> req.downsample) * (icols >> req.downsample) +& (irow_padded >> req.downsample) * (icols >> req.downsample) +& (icol_padded >> req.downsample),
req.addr_start.zext +& (ich >> log2Up(block_size)) * input_spad_stride +& b * (irows >> req.downsample) * (icols >> req.downsample) +& (irow_padded >> req.downsample) * (icols >> req.downsample) +& (icol_padded >> req.downsample))
// Sizes
val block_size_downsampled = (block_size.U << req.downsample).asUInt.zext
val I = MuxCase(
Mux(icols_unpadded.zext -& icol > block_size_downsampled, block_size_downsampled, icols_unpadded.zext -& icol),
Seq(
(icol < 0.S) -> Mux((0.S-&icol) > block_size.S, block_size.S, 0.S-&icol),
(icol >= icols_unpadded.zext) -> Mux(icols_unpadded.zext +& undilated(rpad).zext -& icol > block_size.S, block_size.S, icols_unpadded.zext +& undilated(rpad).zext -& icol)
)
)
val K = Mux(req.trans_input_3120,
Mux(batches.zext -& b > max_chs_per_mvin, max_chs_per_mvin, batches.zext -& b),
Mux(ichs.zext -& ich > max_chs_per_mvin, max_chs_per_mvin, ichs.zext -& ich))
class RoCCCommandWithAddr extends Bundle {
val cmd = new RoCCCommand
val dram_addr = UInt()
val spad_addr = SInt()
val I = SInt()
val K = SInt()
}
val command_p = Module(new Pipeline[RoCCCommandWithAddr](new RoCCCommandWithAddr, latency)())
// Commands
val config_cmd = Wire(new RoCCCommand)
config_cmd := DontCare
config_cmd.inst.funct := CONFIG_CMD
val config_cmd_rs1 = Wire(config_mvin_rs1_t.cloneType)
config_cmd_rs1 := DontCare
config_cmd_rs1.scale := MVIN_SCALE_IDENTITY
config_cmd_rs1.stride := input_spad_stride
config_cmd_rs1.pixel_repeats := req.max_pixels_per_row
config_cmd_rs1.state_id := 0.U
config_cmd_rs1.shrink := 0.U
config_cmd_rs1._unused := 1.U
config_cmd.rs1 := config_cmd_rs1.asUInt
config_cmd.rs2 := dram_stride << req.downsample
val mvin_cmd = Wire(new RoCCCommand)
mvin_cmd := DontCare
mvin_cmd.inst.funct := LOAD_CMD
mvin_cmd.rs1 := 0.U // dram_addr
mvin_cmd.rs2 := 0.U // mvin_cmd_rs2
// Inputs and outputs
io.req.ready := state === idle && !command_p.io.busy
io.idle := state === idle && !command_p.io.busy
io.loop_id := req.loop_id
command_p.io.in.valid := state =/= idle && !io.wait_for_prev_loop && (req.dram_addr =/= 0.U)
command_p.io.in.bits.cmd := Mux(state === config, config_cmd, mvin_cmd)
command_p.io.in.bits.dram_addr := dram_addr
command_p.io.in.bits.spad_addr := spad_addr
command_p.io.in.bits.I := I
command_p.io.in.bits.K := K
command_p.io.out.ready := io.cmd.ready && !io.rob_overloaded
io.cmd.valid := command_p.io.out.valid && !io.rob_overloaded
io.cmd.bits := command_p.io.out.bits.cmd
when (command_p.io.out.bits.cmd.inst.funct === LOAD_CMD) {
val o = command_p.io.out.bits
io.cmd.bits.rs1 := o.dram_addr
val mvin_cmd_rs2 = Wire(mvin_rs2_t.cloneType)
mvin_cmd_rs2 := DontCare
mvin_cmd_rs2.num_rows := (o.I >> req.downsample).asUInt
mvin_cmd_rs2.num_cols := o.K.asUInt
mvin_cmd_rs2.local_addr := cast_to_sp_addr(mvin_cmd_rs2.local_addr, o.spad_addr)
io.cmd.bits.rs2 := mvin_cmd_rs2.asUInt
}
// Sending outputs
when(req.dram_addr === 0.U){
state := idle
}.elsewhen(command_p.io.in.fire) {
when (state === config) {
state := ld
}.otherwise {
val b_it = Mux(req.trans_input_3120, max_chs_per_mvin.asUInt, 1.U)
val ich_it = Mux(req.trans_input_3120, 1.U, max_chs_per_mvin.asUInt)
val next_ich = sFloorAdd(ich, ich_it, ichs.zext, 0.S)
val next_icol = sFloorAdd(icol, I.asUInt, (icols_unpadded +& undilated(rpad)).zext, 0.S-&undilated(lpad).zext,
next_ich === 0.S)
val next_irow = sFloorAdd(irow, 1.U << req.downsample, (irows_unpadded +& undilated(dpad)).zext, 0.S-&undilated(upad).zext,
next_icol === 0.S-&undilated(lpad).zext && next_ich === 0.S)
val next_b = sFloorAdd(b, b_it, batches.zext, 0.S,
next_irow === 0.S-&undilated(upad).zext && next_icol === 0.S-&undilated(lpad).zext && next_ich === 0.S)
ich := next_ich
icol := next_icol
irow := next_irow
b := next_b
state := Mux(next_b === 0.S && next_irow === 0.S-&undilated(upad).zext && next_icol === 0.S-&undilated(lpad).zext && next_ich === 0.S,
idle, ld)
}
}
// Accepting requests
when (io.req.fire) {
req := io.req.bits
state := config
b := 0.S
irow := 0.S -& ((io.req.bits.inner_bounds.upad +& io.req.bits.input_dilated) >> io.req.bits.input_dilated).zext
icol := 0.S -& ((io.req.bits.inner_bounds.lpad +& io.req.bits.input_dilated) >> io.req.bits.input_dilated).zext
ich := 0.S
}
}
class LoopConvLdWeightReq(val coreMaxAddrBits: Int, val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val max_addr: Int, val concurrent_loops: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val derived_params = new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val addr_end = UInt(log2Up(max_addr+1).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val trans_weight_1203 = Bool()
val trans_weight_0132 = Bool()
val dw = Bool()
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopConvLdWeight(block_size: Int, coreMaxAddrBits: Int, large_iterator_bitwidth: Int,
small_iterator_bitwidth: Int, tiny_iterator_bitwidth: Int, max_addr: Int, input_w: Int,
max_block_len: Int, concurrent_loops: Int, latency: Int, config_mvin_rs1_t: ConfigMvinRs1,
mvin_rs2_t: MvinRs2)(implicit p: Parameters) extends Module {
val MVIN_SCALE_IDENTITY = 0x3f800000.U // TODO get this from configs somehow
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopConvLdWeightReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val wait_for_prev_loop = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, config, ld = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopConvLdWeightReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, concurrent_loops))
import req.outer_bounds._
import req.inner_bounds._
import req.derived_params._
// Derived parameters
val max_chs_per_mvin = {
val max_ochs_per_mvin = Mux(ochs < (max_block_len * block_size).U, ochs, (max_block_len * block_size).U)
val max_kchs_per_mvin = Mux(kchs < (max_block_len * block_size).U, kchs, (max_block_len * block_size).U)
Mux(req.trans_weight_0132, max_kchs_per_mvin, max_ochs_per_mvin)
}
val B_rows = Mux(req.trans_weight_0132, in_channels_per_bank * kcols * krows * ochs,
out_channels_per_bank * kcols * krows * kchs)
val addr_start = req.addr_end - B_rows
val dram_stride = MuxCase(weight_stride, Seq(
req.dw -> 1.U,
req.trans_weight_1203 -> (kernel_dim * kernel_dim * out_channels),
req.trans_weight_0132 -> in_channels
)) * (input_w/8).U
// Iterators
val och = Reg(UInt(large_iterator_bitwidth.W))
val krow = Reg(UInt(tiny_iterator_bitwidth.W))
val kcol = Reg(UInt(tiny_iterator_bitwidth.W))
val kch = Reg(UInt(large_iterator_bitwidth.W))
// Addresses
val dram_offset = MuxCase(((krow*kernel_dim*in_channels +& kcol*in_channels +& kch) * weight_stride +& och) * (input_w/8).U, Seq(
req.dw -> (krow * kernel_dim +& kcol) * (input_w/8).U,
req.trans_weight_1203 -> (((kch*kernel_dim*kernel_dim +& krow*kernel_dim +& kcol) * out_channels +& och) * (input_w/8).U),
req.trans_weight_0132 -> (((krow*kernel_dim*out_channels +& kcol*out_channels +& och) * in_channels +& kch) * (input_w/8).U)
))
val dram_addr = req.dram_addr + LoopConv.castDramOffset(dram_offset)
val spad_addr = Mux(req.trans_weight_0132,
// The width expansions are added here solely to prevent Verilator's "WIDTH" warnings, despite making the code uglier
addr_start + (kch / block_size.U(kch.getWidth.W)) * krows * kcols * ochs + krow * kcols * ochs + kcol * ochs + och,
addr_start + (och / block_size.U(och.getWidth.W)) * krows * kcols * kchs + krow * kcols * kchs + kcol * kchs + kch)
// Sizes
val J = Mux(req.trans_weight_0132,
Mux(kchs - kch > max_chs_per_mvin, max_chs_per_mvin, kchs - kch),
Mux(ochs - och > max_chs_per_mvin, max_chs_per_mvin, ochs - och))
val K = Mux(req.trans_weight_0132,
Mux(ochs - och > block_size.U, block_size.U, ochs - och),
Mux(kchs - kch > block_size.U, block_size.U, kchs - kch))
class RoCCCommandWithAddr extends Bundle {
val cmd = new RoCCCommand
val dram_addr = UInt()
val spad_addr = UInt()
val K = UInt()
val J = UInt()
}
val command_p = Module(new Pipeline[RoCCCommandWithAddr](new RoCCCommandWithAddr, latency)())
// Commands
val config_cmd = Wire(new RoCCCommand)
config_cmd := DontCare
config_cmd.inst.funct := CONFIG_CMD
val config_cmd_rs1 = Wire(config_mvin_rs1_t.cloneType)
config_cmd_rs1 := DontCare
config_cmd_rs1.scale := MVIN_SCALE_IDENTITY
config_cmd_rs1.stride := req.derived_params.weight_spad_stride
config_cmd_rs1.pixel_repeats := 1.U
config_cmd_rs1.state_id := 1.U
config_cmd_rs1.shrink := 0.U
config_cmd_rs1._unused := 1.U
config_cmd.rs1 := config_cmd_rs1.asUInt
config_cmd.rs2 := dram_stride
val mvin_cmd = Wire(new RoCCCommand)
mvin_cmd := DontCare
mvin_cmd.inst.funct := LOAD2_CMD
mvin_cmd.rs1 := 0.U // dram_addr
mvin_cmd.rs2 := 0.U // mvin_cmd_rs2
// Inputs and outputs
io.req.ready := state === idle && !command_p.io.busy
io.idle := state === idle && !command_p.io.busy
io.loop_id := req.loop_id
command_p.io.in.valid := state =/= idle && !io.wait_for_prev_loop && (req.dram_addr =/= 0.U)
command_p.io.in.bits.cmd := Mux(state === config, config_cmd, mvin_cmd)
command_p.io.in.bits.dram_addr := dram_addr
command_p.io.in.bits.spad_addr := spad_addr
command_p.io.in.bits.K := K
command_p.io.in.bits.J := J
command_p.io.out.ready := io.cmd.ready && !io.rob_overloaded
io.cmd.valid := command_p.io.out.valid && !io.rob_overloaded
io.cmd.bits := command_p.io.out.bits.cmd
when (command_p.io.out.bits.cmd.inst.funct === LOAD2_CMD) {
val o = command_p.io.out.bits
io.cmd.bits.rs1 := o.dram_addr
val mvin_cmd_rs2 = Wire(mvin_rs2_t.cloneType)
mvin_cmd_rs2 := DontCare
mvin_cmd_rs2.num_rows := o.K
mvin_cmd_rs2.num_cols := o.J
mvin_cmd_rs2.local_addr := cast_to_sp_addr(mvin_cmd_rs2.local_addr, o.spad_addr)
io.cmd.bits.rs2 := mvin_cmd_rs2.asUInt
}
// Sending outputs
when(req.dram_addr === 0.U){
state := idle
}.elsewhen(command_p.io.in.fire) {
when (state === config) {
state := ld
}.otherwise {
val och_it = Mux(req.trans_weight_0132, block_size.U, max_chs_per_mvin)
val kch_it = Mux(req.trans_weight_0132, max_chs_per_mvin, block_size.U)
val next_kch = floorAdd(kch, kch_it, kchs)
val next_kcol = floorAdd(kcol, 1.U, kcols, next_kch === 0.U)
val next_krow = floorAdd(krow, 1.U, krows, next_kcol === 0.U && next_kch === 0.U)
val next_och = floorAdd(och, och_it, ochs, next_krow === 0.U && next_kcol === 0.U && next_kch === 0.U)
kch := next_kch
kcol := next_kcol
krow := next_krow
och := next_och
state := Mux(next_och === 0.U && next_krow === 0.U && next_kcol === 0.U && next_kch === 0.U,
idle, ld)
}
}
// Accepting requests
when (io.req.fire) {
req := io.req.bits
state := config
kch := 0.U
kcol := 0.U
krow := 0.U
och := 0.U
}
}
class LoopConvExecuteReq(val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val max_addr: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val derived_params = new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val a_addr_start = UInt(log2Up(max_addr).W)
val b_addr_end = UInt(log2Up(max_addr+1).W)
val c_addr_start = UInt(log2Up(max_acc_addr).W)
val wrot180 = Bool()
val downsample = Bool()
val max_pixels_per_row = UInt(small_iterator_bitwidth.W)
val input_dilated = Bool()
val trans_weight_0132 = Bool()
val trans_input_3120 = Bool()
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopConvExecute(block_size: Int, large_iterator_bitwidth: Int, small_iterator_bitwidth: Int, tiny_iterator_bitwidth: Int, max_addr: Int,
max_acc_addr: Int, concurrent_loops: Int, latency: Int,
config_ex_rs1_t: ConfigExRs1, preload_rs1_t: PreloadRs, preload_rs2_t: PreloadRs,
compute_rs1_t: ComputeRs, compute_rs2_t: ComputeRs)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopConvExecuteReq(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, max_acc_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val lda_completed = Input(Bool())
val ldb_completed = Input(Bool())
val ldd_completed = Input(Bool())
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, config, pre, comp = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopConvExecuteReq(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth,
max_addr, max_acc_addr, concurrent_loops))
import req.outer_bounds._
import req.inner_bounds._
import req.derived_params._
def undilated(x: UInt): UInt = (x +& req.input_dilated) >> req.input_dilated
// Derived parameters
val B_rows = Mux(req.trans_weight_0132, in_channels_per_bank * kcols * krows * ochs,
out_channels_per_bank * kcols * krows * kchs)
val a_addr_start = req.a_addr_start
val b_addr_start = req.b_addr_end - B_rows
val c_addr_start = /*(BigInt(3) << 30).U |*/ req.c_addr_start
// Iterators
val och = Reg(UInt(large_iterator_bitwidth.W))
val krow = Reg(UInt(tiny_iterator_bitwidth.W))
val kcol = Reg(UInt(tiny_iterator_bitwidth.W))
val kch = Reg(UInt(large_iterator_bitwidth.W))
val b = Reg(UInt(large_iterator_bitwidth.W))
val orow = Reg(UInt(small_iterator_bitwidth.W))
val ocol = Reg(UInt(small_iterator_bitwidth.W))
// TODO kernel-dilation and input-dilation can never be activated at the same time, so we can optimize out some multiplications by kernel_dilation
val skip_iteration = state >= pre && req.input_dilated && (((krow * kernel_dilation +& orow -& upad)(0) & req.input_dilated).asBool ||
((kcol * kernel_dilation +& ocol -& lpad)(0) & req.input_dilated).asBool)
val pixels = Mux(kcols - kcol > req.max_pixels_per_row, req.max_pixels_per_row, kcols - kcol)
val irow = undilated(orow * stride +& krow * kernel_dilation)
val icol = undilated(ocol * stride +& kcol * kernel_dilation)
val I = Mux(req.trans_input_3120,
Mux(batches - b > block_size.U, block_size.U, batches - b),
undilated(Mux(ocols - ocol > (block_size.U << req.input_dilated).asUInt, (block_size.U << req.input_dilated).asUInt, ocols - ocol)))
val J = Mux(ochs - och > block_size.U, block_size.U, ochs - och)
val K = pixels * Mux(kchs - kch > block_size.U, block_size.U, kchs - kch)
// Addresses
val a_addr = Mux(req.trans_input_3120,
a_addr_start +& (b / block_size.U) * input_spad_stride +& kch * (irows >> req.downsample) * (icols >> req.downsample) +& (irow >> req.downsample) * (icols >> req.downsample) +& (icol >> req.downsample),
a_addr_start +& (kch / block_size.U(kch.getWidth.W)) * input_spad_stride +& b * (irows >> req.downsample) * (icols >> req.downsample) +& (irow >> req.downsample) * (icols >> req.downsample) +& (icol >> req.downsample))
// val c_addr = Mux(ex_overwrite && krow === 0.U && kcol === 0.U && kch === 0.U, d_addr_start, c_addr_start) +&
// (och / block_size.U) * batches * orows * ocols +& b * orows * ocols +& orow * ocols +& ocol
// The width expansions are added here solely to prevent Verilator's "WIDTH" warnings, despite making the code uglier
val c_addr = c_addr_start +&
(och / block_size.U(och.getWidth.W)) * batches * orows * ocols +& b * orows * ocols +& orow * ocols +& ocol
// val new_weights = b === 0.U && orow === 0.U && ocol === 0.U
val new_weights = Reg(Bool())
val krow_rot = Mux(req.wrot180, krows - krow - 1.U, krow)
val kcol_rot = Mux(req.wrot180, kcols - kcol - 1.U, kcol)
val b_addr = Mux(req.trans_weight_0132,
b_addr_start +& (kch / block_size.U(och.getWidth.W)) * krows * kcols * ochs +& krow_rot * kcols * ochs +& kcol_rot * ochs +& och,
b_addr_start +& (och / block_size.U(och.getWidth.W)) * krows * kcols * kchs +& krow_rot * kcols * kchs +& kcol_rot * kchs +& kch)
class RoCCCommandWithAddr extends Bundle {
val cmd = new RoCCCommand
val a_addr = UInt()
val b_addr = UInt()
val c_addr = UInt()
val I = UInt()
val J = UInt()
val K = UInt()
val new_weights = Bool()
}
val command_p = Module(new Pipeline[RoCCCommandWithAddr](new RoCCCommandWithAddr, latency)())
// Commands
val config_cmd = Wire(new RoCCCommand)
config_cmd := DontCare
config_cmd.inst.funct := CONFIG_CMD
val config_cmd_rs1 = Wire(config_ex_rs1_t.cloneType)
config_cmd_rs1 := DontCare
config_cmd_rs1.a_stride := (irows * icols).asUInt
config_cmd_rs1.set_only_strides := 1.U
config_cmd_rs1.cmd_type := 0.U
val config_cmd_rs2 = Wire(new ConfigExRs2)
config_cmd_rs2 := DontCare
config_cmd_rs2.c_stride := (orows * ocols).asUInt
config_cmd.rs1 := config_cmd_rs1.asUInt
config_cmd.rs2 := config_cmd_rs2.asUInt
val pre_cmd = Wire(new RoCCCommand) // preload
pre_cmd := DontCare
pre_cmd.inst.funct := PRELOAD_CMD
pre_cmd.rs1 := 0.U//(K << 48) | (J << 32) | pre_addr
pre_cmd.rs2 := 0.U//(I << 48) | (J << 32) | c_addr
val comp_cmd = Wire(new RoCCCommand()) // compute.preloaded
comp_cmd := DontCare
comp_cmd.inst.funct := Mux(new_weights, COMPUTE_AND_FLIP_CMD, COMPUTE_AND_STAY_CMD)
comp_cmd.rs1 := 0.U//(I << 48) | (K << 32) | a_addr
comp_cmd.rs2 := 0.U//(I << 48) | (J << 32) | GARBAGE_ADDR
val ld_ahead = io.lda_completed && io.ldb_completed && io.ldd_completed
// Inputs and outputs
io.req.ready := state === idle && !command_p.io.busy
io.idle := state === idle && !command_p.io.busy
io.loop_id := req.loop_id
command_p.io.in.valid := state =/= idle && !skip_iteration && ld_ahead
command_p.io.in.bits.cmd := MuxCase(config_cmd, Seq((state === pre) -> pre_cmd, (state === comp) -> comp_cmd))
command_p.io.in.bits.a_addr := a_addr
command_p.io.in.bits.b_addr := b_addr
command_p.io.in.bits.c_addr := c_addr
command_p.io.in.bits.I := I
command_p.io.in.bits.J := J
command_p.io.in.bits.K := K
command_p.io.in.bits.new_weights := new_weights
command_p.io.out.ready := io.cmd.ready && !io.rob_overloaded
io.cmd.valid := command_p.io.out.valid && !io.rob_overloaded
io.cmd.bits := command_p.io.out.bits.cmd
when (command_p.io.out.bits.cmd.inst.funct === PRELOAD_CMD) {
val o = command_p.io.out.bits
val pre_cmd_rs1 = Wire(preload_rs1_t.cloneType)
pre_cmd_rs1 := DontCare
pre_cmd_rs1.num_rows := o.K.asUInt
pre_cmd_rs1.num_cols := o.J.asUInt
pre_cmd_rs1.local_addr := Mux(o.new_weights, cast_to_sp_addr(pre_cmd_rs1.local_addr, o.b_addr),
garbage_addr(pre_cmd_rs1.local_addr))
val pre_cmd_rs2 = Wire(preload_rs2_t.cloneType)
pre_cmd_rs2 := DontCare
pre_cmd_rs2.num_rows := o.I.asUInt
pre_cmd_rs2.num_cols := o.J.asUInt
pre_cmd_rs2.local_addr := cast_to_acc_addr(pre_cmd_rs2.local_addr, o.c_addr, accumulate = true.B, read_full = false.B)
io.cmd.bits.rs1 := pre_cmd_rs1.asUInt
io.cmd.bits.rs2 := pre_cmd_rs2.asUInt
}.elsewhen(command_p.io.out.bits.cmd.inst.funct =/= CONFIG_CMD) {
val o = command_p.io.out.bits
val comp_cmd_rs1 = Wire(compute_rs1_t.cloneType)
comp_cmd_rs1 := DontCare
comp_cmd_rs1.num_rows := o.I.asUInt
comp_cmd_rs1.num_cols := o.K.asUInt
comp_cmd_rs1.local_addr := cast_to_sp_addr(comp_cmd_rs1.local_addr, o.a_addr)
val comp_cmd_rs2 = Wire(compute_rs2_t.cloneType)
comp_cmd_rs2 := DontCare
comp_cmd_rs2.num_rows := o.I.asUInt
comp_cmd_rs2.num_cols := o.J.asUInt
comp_cmd_rs2.local_addr := garbage_addr(comp_cmd_rs2.local_addr)
io.cmd.bits.rs1 := comp_cmd_rs1.asUInt
io.cmd.bits.rs2 := comp_cmd_rs2.asUInt
}
// Updating "new_weights"
when (state === comp && command_p.io.in.fire) {
new_weights := false.B
}
// Sending outputs
when (command_p.io.in.fire || skip_iteration) {
when (state === config) {
state := pre
}.elsewhen (state === pre) {
state := comp
}.otherwise {
val b_it = Mux(req.trans_input_3120, block_size.U, 1.U)
val ocol_it = Mux(skip_iteration || req.trans_input_3120, 1.U, block_size.U << req.input_dilated).asUInt
val next_ocol = floorAdd(ocol, ocol_it, ocols)
val next_orow = floorAdd(orow, 1.U, orows, next_ocol === 0.U)
val next_b = floorAdd(b, b_it, batches, next_orow === 0.U && next_ocol === 0.U)
val next_kch = floorAdd(kch, block_size.U, kchs,
next_b === 0.U && next_orow === 0.U && next_ocol === 0.U)
val next_kcol = floorAdd(kcol, req.max_pixels_per_row, kcols,
next_kch === 0.U && next_b === 0.U && next_orow === 0.U && next_ocol === 0.U)
val next_krow = floorAdd(krow, 1.U, krows,
next_kcol === 0.U && next_kch === 0.U && next_b === 0.U && next_orow === 0.U && next_ocol === 0.U)
val next_och = floorAdd(och, block_size.U, ochs, next_krow === 0.U &&
next_kcol === 0.U && next_kch === 0.U && next_b === 0.U && next_orow === 0.U && next_ocol === 0.U)
ocol := next_ocol
orow := next_orow
b := next_b
kch := next_kch
kcol := next_kcol
krow := next_krow
och := next_och
when (next_b === 0.U && next_orow === 0.U && next_ocol === 0.U) {
new_weights := true.B
}
state := Mux(next_och === 0.U && next_krow === 0.U && next_kcol === 0.U && next_kch === 0.U && next_b === 0.U &&
next_orow === 0.U && next_ocol === 0.U,
idle, pre)
}
}
// Accepting requests
when (io.req.fire) {
req := io.req.bits
state := Mux(io.req.bits.trans_input_3120, config, pre)
b := 0.U
orow := 0.U
ocol := 0.U
och := 0.U
krow := 0.U
kcol := 0.U
kch := 0.U
new_weights := true.B
}
}
class LoopConvStReq(val coreMaxAddrBits: Int, val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val derived_params = new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val addr_start = UInt(log2Up(max_acc_addr).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val no_pool = Bool()
val activation = UInt(2.W) // TODO magic number
val trans_output_1203 = Bool()
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopConvSt(block_size: Int, coreMaxAddrBits: Int, large_iterator_bitwidth: Int, small_iterator_bitwidth: Int, tiny_iterator_bitwidth: Int, max_acc_addr: Int, input_w: Int, concurrent_loops: Int, latency: Int, config_mvout_rs2_t: ConfigMvoutRs2, mvout_rs2_t: MvoutRs2)(implicit p: Parameters) extends Module {
val ACC_SCALE_NO_CHANGE = ~(0.U(32.W)) // TODO get this from ISA description somehow
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopConvStReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth: Int, max_acc_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val ex_completed = Input(Bool())
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, st, pre_pool_config, pool, post_pool_config = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopConvStReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth: Int, max_acc_addr, concurrent_loops))
import req.outer_bounds._
import req.inner_bounds._
import req.derived_params._
val acc_addr_start = req.addr_start
// Derived parameters
val skip = req.dram_addr === 0.U
// Iterators
val b = Reg(UInt(large_iterator_bitwidth.W))
val orow = Reg(UInt(small_iterator_bitwidth.W))
val ocol = Reg(UInt(small_iterator_bitwidth.W))
val och = Reg(UInt(large_iterator_bitwidth.W))
// Addresses
val dram_offset = Mux(req.trans_output_1203,
((orow*out_col_dim*batch_size +& ocol*batch_size +& b) * out_channels +& och) * (input_w/8).U,
((b*out_row_dim*out_col_dim +& orow*out_col_dim +& ocol) * out_stride +& och) * (input_w/8).U)
val dram_addr = req.dram_addr + LoopConv.castDramOffset(dram_offset)
val spad_addr = acc_addr_start +& (och / block_size.U(och.getWidth.W)) * batches * orows * ocols +& b * orows * ocols +& orow * ocols +& ocol
val pool_dram_addr = req.dram_addr + ((b * pool_out_col_dim * pool_out_row_dim) * out_stride + och) * (input_w/8).U
val pool_spad_addr = acc_addr_start +& (och / block_size.U(och.getWidth.W)) * batches * orows * ocols +& b * orows * ocols
// Sizes
val I = Mux(ocols - ocol > block_size.U, block_size.U, ocols - ocol)
val J = Mux(ochs - och > block_size.U, block_size.U, ochs - och)
val channels = J
class RoCCCommandWithAddr extends Bundle {
val cmd = new RoCCCommand
val dram_addr = UInt()
val spad_addr = UInt()
val pool_dram_addr = UInt()
val pool_spad_addr = UInt()
val channels = UInt()
val is_pool = Bool()
val I = UInt()
val J = UInt()
}
val command_p = Module(new Pipeline[RoCCCommandWithAddr](new RoCCCommandWithAddr, latency)())
// Commands
val mvout_cmd = Wire(new RoCCCommand)
mvout_cmd := DontCare
mvout_cmd.inst.funct := STORE_CMD
mvout_cmd.rs1 := 0.U // dram_addr
mvout_cmd.rs2 := 0.U // mvout_cmd_rs2
val pre_pool_config_cmd = Wire(new RoCCCommand)
pre_pool_config_cmd := DontCare
pre_pool_config_cmd.inst.funct := CONFIG_CMD
val pre_pool_config_cmd_rs1 = Wire(new ConfigMvoutRs1)
pre_pool_config_cmd_rs1 := DontCare
pre_pool_config_cmd_rs1.ocols := ocols
pre_pool_config_cmd_rs1.orows := orows
pre_pool_config_cmd_rs1.pocols := pocols
pre_pool_config_cmd_rs1.porows := porows
pre_pool_config_cmd_rs1.pool_out_dim := pool_out_col_dim
pre_pool_config_cmd_rs1.lpad := plpad
pre_pool_config_cmd_rs1.upad := pupad
pre_pool_config_cmd_rs1.pool_size := pool_size
pre_pool_config_cmd_rs1.pool_stride := pool_stride
pre_pool_config_cmd_rs1.activation := req.activation
pre_pool_config_cmd_rs1.cmd_type := CONFIG_STORE
pre_pool_config_cmd.rs1 := pre_pool_config_cmd_rs1.asUInt
val pre_pool_config_cmd_rs2 = Wire(config_mvout_rs2_t.cloneType)
pre_pool_config_cmd_rs2 := DontCare
pre_pool_config_cmd_rs2.acc_scale := ACC_SCALE_NO_CHANGE
pre_pool_config_cmd_rs2.stride := out_stride * (input_w / 8).U
pre_pool_config_cmd.rs2 := pre_pool_config_cmd_rs2.asUInt
val post_pool_config_cmd = Wire(new RoCCCommand)
post_pool_config_cmd := DontCare
post_pool_config_cmd.inst.funct := CONFIG_CMD
val post_pool_config_cmd_rs1 = Wire(new ConfigMvoutRs1)
post_pool_config_cmd_rs1 := DontCare
post_pool_config_cmd_rs1.activation := req.activation
post_pool_config_cmd_rs1.cmd_type := CONFIG_STORE
post_pool_config_cmd.rs1 := post_pool_config_cmd_rs1.asUInt
val post_pool_config_cmd_rs2 = Wire(config_mvout_rs2_t.cloneType)
post_pool_config_cmd_rs2 := DontCare
post_pool_config_cmd_rs2.acc_scale := ACC_SCALE_NO_CHANGE
post_pool_config_cmd_rs2.stride := out_stride * (input_w / 8).U
post_pool_config_cmd.rs2 := post_pool_config_cmd_rs2.asUInt
val pool_cmd = Wire(new RoCCCommand)
pool_cmd := DontCare
pool_cmd.inst.funct := STORE_CMD
pool_cmd.rs1 := 0.U//pool_dram_addr
pool_cmd.rs2 := 0.U//(channels << 32.U) | pool_spad_addr
// Inputs and outputs
io.req.ready := state === idle && !command_p.io.busy
io.idle := state === idle && !command_p.io.busy
io.loop_id := req.loop_id
command_p.io.in.valid := state =/= idle && !skip && io.ex_completed
command_p.io.in.bits.cmd := MuxLookup(state.asUInt, mvout_cmd)(Seq(
pre_pool_config.asUInt -> pre_pool_config_cmd,
pool.asUInt -> pool_cmd,
post_pool_config.asUInt -> post_pool_config_cmd)
)
command_p.io.in.bits.is_pool := state === pool
command_p.io.in.bits.dram_addr := dram_addr
command_p.io.in.bits.spad_addr := spad_addr
command_p.io.in.bits.pool_spad_addr := pool_spad_addr
command_p.io.in.bits.pool_dram_addr := pool_dram_addr
command_p.io.in.bits.channels := channels
command_p.io.in.bits.I := I
command_p.io.in.bits.J := J
command_p.io.out.ready := io.cmd.ready && !io.rob_overloaded
io.cmd.valid := command_p.io.out.valid && !io.rob_overloaded
io.cmd.bits := command_p.io.out.bits.cmd
when (command_p.io.out.bits.cmd.inst.funct === STORE_CMD) {
val o = command_p.io.out.bits
when (o.is_pool) {
val pool_mvout_cmd_rs2 = Wire(mvout_rs2_t.cloneType)
pool_mvout_cmd_rs2 := DontCare
pool_mvout_cmd_rs2.num_cols := o.channels
pool_mvout_cmd_rs2.local_addr := cast_to_acc_addr(pool_mvout_cmd_rs2.local_addr, o.pool_spad_addr, accumulate = false.B, read_full = false.B)
io.cmd.bits.rs1 := o.pool_dram_addr
io.cmd.bits.rs2 := pool_mvout_cmd_rs2.asUInt
} .otherwise {
val mvout_cmd_rs2 = Wire(mvout_rs2_t.cloneType)
mvout_cmd_rs2 := DontCare
mvout_cmd_rs2.num_rows := o.I.asUInt
mvout_cmd_rs2.num_cols := o.J.asUInt
mvout_cmd_rs2.local_addr := cast_to_acc_addr(mvout_cmd_rs2.local_addr, o.spad_addr, accumulate = false.B, read_full = false.B)
io.cmd.bits.rs1 := o.dram_addr
io.cmd.bits.rs2 := mvout_cmd_rs2.asUInt
}
}
// Sending outputs
when (skip) {
state := idle
}.elsewhen(command_p.io.in.fire) {
when (req.no_pool) {
val next_och = floorAdd(och, block_size.U, ochs)
val next_ocol = floorAdd(ocol, block_size.U, ocols, next_och === 0.U)
val next_orow = floorAdd(orow, 1.U, orows, next_ocol === 0.U && next_och === 0.U)
val next_b = floorAdd(b, 1.U, batches, next_orow === 0.U && next_ocol === 0.U && next_och === 0.U)
och := next_och
ocol := next_ocol
orow := next_orow
b := next_b
state := Mux(next_b === 0.U && next_orow === 0.U && next_ocol === 0.U && next_och === 0.U,
idle, st)
}.elsewhen(state === pre_pool_config) {
state := pool
}.elsewhen(state === post_pool_config) {
state := idle
}.otherwise {
val next_och = floorAdd(och, block_size.U, ochs)
val next_b = floorAdd(b, 1.U, batches, next_och === 0.U)
och := next_och
b := next_b
state := Mux(next_b === 0.U && next_och === 0.U,
post_pool_config, pool)
}
}
// Accepting requests
when (io.req.fire) {
req := io.req.bits
state := Mux(io.req.bits.no_pool, st, pre_pool_config)
b := 0.U
orow := 0.U
ocol := 0.U
och := 0.U
}
}
class LoopConvState(val block_size: Int, val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val coreMaxAddrBits: Int, val max_addr: Int, val max_acc_addr: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val bias_dram_addr = UInt(coreMaxAddrBits.W)
val weights_dram_addr = UInt(coreMaxAddrBits.W)
val input_dram_addr = UInt(coreMaxAddrBits.W)
val output_dram_addr = UInt(coreMaxAddrBits.W)
val no_bias = Bool()
val wrot180 = Bool()
val no_pool = Bool()
val downsample = Bool()
val input_dilated = Bool()
val activation = UInt(2.W) // TODO magic number
val trans_output_1203 = Bool()
val trans_weight_1203 = Bool()
val trans_weight_0132 = Bool()
val trans_input_3120 = Bool()
val dw = Bool()
val max_pixels_per_row = UInt(small_iterator_bitwidth.W)
val a_ex_spad_id = UInt(2.W)
val b_ex_spad_id = UInt(2.W)
val configured = Bool()
val running = Bool()
val ld_bias_started = Bool()
val ld_input_started = Bool()
val ld_weights_started = Bool()
val ex_started = Bool()
val st_started = Bool()
val ld_bias_completed = Bool()
val ld_input_completed = Bool()
val ld_weights_completed = Bool()
val ex_completed = Bool()
val st_completed = Bool()
def all_completed(dummy: Int=0): Bool = ld_bias_completed && ld_input_completed && ld_weights_completed && ex_completed && st_completed
val a_addr_start = UInt(log2Up(max_addr).W)
val b_addr_end = UInt(log2Up(max_addr+1).W)
def derived_params(dummy: Int=0): LoopConvDerivedParams = {
import outer_bounds.{stride, kernel_dilation}
import inner_bounds.{batches, pochs, orows, ocols, krows, kcols, upad, dpad, lpad, rpad, kchs}
val result = Wire(new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth))
result.ochs := pochs
val dilated_krows = krows + (kernel_dilation - 1.U)*(krows - 1.U)
val dilated_kcols = kcols + (kernel_dilation - 1.U)*(kcols - 1.U)
val irows_without_dilation = orows * stride +& dilated_krows -& 1.U
val icols_without_dilation = ocols * stride +& dilated_kcols -& 1.U
val irows_unpadded_without_dilation = irows_without_dilation -& upad -& dpad
val icols_unpadded_without_dilation = icols_without_dilation -& lpad -& rpad
def undilated(x: UInt): UInt = (x +& input_dilated) >> input_dilated
val irows_unpadded = undilated(irows_unpadded_without_dilation)
val icols_unpadded = undilated(icols_unpadded_without_dilation)
result.irows := Mux(input_dilated, irows_unpadded +& undilated(upad) +& undilated(dpad), irows_without_dilation)
result.icols := Mux(input_dilated, icols_unpadded +& undilated(lpad) +& undilated(rpad), icols_without_dilation)
result.irows_unpadded := irows_unpadded
result.icols_unpadded := icols_unpadded
result.ichs := kchs
result.out_channels_per_bank := result.ochs / block_size.U(result.ochs.getWidth.W) +& (result.ochs % block_size.U =/= 0.U)
result.in_channels_per_bank := result.ichs / block_size.U(result.ochs.getWidth.W) +& (result.ichs % block_size.U =/= 0.U)
result.bias_spad_stride := batches * orows * ocols
result.input_spad_stride := Mux(trans_input_3120,
result.ichs * (result.irows >> downsample) * (result.icols >> downsample),
batches * (result.irows >> downsample) * (result.icols >> downsample))
result.weight_spad_stride := Mux(trans_weight_0132, krows * kcols * pochs, krows * kcols * kchs)
// result.ex_overwrite := bias_dram_addr =/= 0.U && no_bias
result
}
def reset(): Unit = {
configured := false.B
running := false.B
ld_bias_started := false.B
ld_input_started := false.B
ld_weights_started := false.B
ex_started := false.B
st_started := false.B
ld_bias_completed := false.B
ld_input_completed := false.B
ld_weights_completed := false.B
ex_completed := false.B
st_completed := false.B
}
}
class LoopConv (block_size: Int, coreMaxAddrBits: Int, reservation_station_size: Int, max_lds: Int, max_exs: Int, max_sts: Int,
max_addr: Int, max_acc_addr: Int, input_w: Int, acc_w: Int, dma_max_bytes: Int,
config_mvin_rs1_t: ConfigMvinRs1, mvin_rs2_t: MvinRs2, config_mvout_rs2_t: ConfigMvoutRs2, mvout_rs2_t: MvoutRs2,
config_ex_rs1_t: ConfigExRs1, preload_rs1_t: PreloadRs, preload_rs2_t: PreloadRs,
compute_rs1_t: ComputeRs, compute_rs2_t: ComputeRs,
has_training_convs: Boolean, has_max_pool: Boolean, has_first_layer_optimizations: Boolean,
has_dw_convs: Boolean)
(implicit p: Parameters) extends Module {
val large_iterator_bitwidth = 16
val small_iterator_bitwidth = 16 // 8
val tiny_iterator_bitwidth = 16 // 4
val max_block_len = (dma_max_bytes / (block_size * (input_w / 8))) max 1
val max_block_len_acc = (dma_max_bytes / (block_size * (acc_w / 8))) max 1
val io = IO(new Bundle {
val in = Flipped(Decoupled(new GemminiCmd(reservation_station_size)))
val out = Decoupled(new GemminiCmd(reservation_station_size))
val ld_completed = Input(UInt(log2Up(reservation_station_size+1).W))
val st_completed = Input(UInt(log2Up(reservation_station_size+1).W))
val ex_completed = Input(UInt(log2Up(reservation_station_size+1).W))
val busy = Output(Bool())
})
// Create states
val concurrent_loops = 2
val loops = Reg(Vec(concurrent_loops, new LoopConvState(block_size, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, coreMaxAddrBits, max_addr, max_acc_addr)))
val head_loop_id = RegInit(0.U(log2Up(concurrent_loops).W))
val tail_loop_id = (~head_loop_id).asUInt // This is the loop that we always try to configure if available
val head_loop = loops(head_loop_id)
val tail_loop = loops(tail_loop_id)
val loop_configured = loops.map(_.configured).reduce(_ || _)
val loop_being_configured_id = Mux(head_loop.configured, tail_loop_id, head_loop_id)
val loop_being_configured = loops(loop_being_configured_id)
// Create inner modules
val latency = 2
val ld_bias = Module(new LoopConvLdBias(block_size, coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_acc_addr, acc_w, max_block_len_acc, concurrent_loops, latency, config_mvin_rs1_t, mvin_rs2_t))
val ld_input = Module(new LoopConvLdInput(block_size, coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, input_w, max_block_len, concurrent_loops, latency, config_mvin_rs1_t, mvin_rs2_t))
val ld_weights = Module(new LoopConvLdWeight(block_size, coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, input_w, max_block_len, concurrent_loops, latency, config_mvin_rs1_t, mvin_rs2_t))
val ex = Module(new LoopConvExecute(block_size, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, max_acc_addr, concurrent_loops, latency, config_ex_rs1_t, preload_rs1_t, preload_rs2_t, compute_rs1_t, compute_rs2_t))
val st = Module(new LoopConvSt(block_size, coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_acc_addr, input_w, concurrent_loops, latency, config_mvout_rs2_t, mvout_rs2_t))
// Create command queue
val cmd = Queue(io.in)
io.busy := cmd.valid || loop_configured
// Create arbiter
val arb = Module(new Arbiter(new RoCCCommand, 5))
arb.io.in(0) <> st.io.cmd
arb.io.in(1) <> ex.io.cmd
arb.io.in(2) <> ld_bias.io.cmd
arb.io.in(3) <> ld_weights.io.cmd
arb.io.in(4) <> ld_input.io.cmd
val unrolled_cmd = arb.io.out
// Create reservation station utilization counters
val ld_utilization = RegInit(0.U(log2Up(max_lds+1).W))
val st_utilization = RegInit(0.U(log2Up(max_sts+1).W))
val ex_utilization = RegInit(0.U(log2Up(max_exs+1).W))
ld_utilization := ld_utilization +& (ld_bias.io.cmd.fire || ld_weights.io.cmd.fire || ld_input.io.cmd.fire) -& io.ld_completed
st_utilization := st_utilization +& st.io.cmd.fire -& io.st_completed
ex_utilization := ex_utilization +& ex.io.cmd.fire -& io.ex_completed
assert(ld_utilization >= io.ld_completed, "ld utilization underflow")
assert(st_utilization >= io.st_completed, "st utilization underflow")
assert(ex_utilization >= io.ex_completed, "ex utilization underflow")
// Wire up unrolled command output
val is_loop_run_cmd = cmd.bits.cmd.inst.funct === LOOP_CONV_WS
val is_loop_config_cmd = cmd.bits.cmd.inst.funct >= LOOP_CONV_WS_CONFIG_1 && cmd.bits.cmd.inst.funct <= LOOP_CONV_WS_CONFIG_6
val is_loop_cmd = is_loop_run_cmd || is_loop_config_cmd
io.out.bits.cmd := Mux(loop_configured, unrolled_cmd.bits, cmd.bits.cmd)
io.out.bits.cmd.status := cmd.bits.cmd.status // TODO This is not guaranteed to be the correct fix! We must fix this
io.out.bits.rob_id := DontCare
io.out.bits.from_matmul_fsm := Mux(loop_configured, false.B, cmd.bits.from_matmul_fsm)
io.out.bits.from_conv_fsm := Mux(loop_configured, true.B, cmd.bits.from_conv_fsm)
io.out.valid := Mux(loop_configured, unrolled_cmd.valid, cmd.valid && !is_loop_config_cmd && !is_loop_run_cmd)
cmd.ready := Mux(is_loop_cmd, !loop_being_configured.configured, !loop_configured && io.out.ready)
arb.io.out.ready := io.out.ready
// Wire up waiting-for-loads signals
val ex_is_waiting_for_loads = loops(ex.io.loop_id).ex_started && !loops(ex.io.loop_id).ex_completed &&
!(loops(ex.io.loop_id).ld_input_completed && loops(ex.io.loop_id).ld_weights_completed &&
loops(ex.io.loop_id).ld_bias_completed)
ld_bias.io.wait_for_prev_loop := ex_is_waiting_for_loads && ld_bias.io.loop_id =/= ex.io.loop_id
ld_weights.io.wait_for_prev_loop := ex_is_waiting_for_loads && ld_weights.io.loop_id =/= ex.io.loop_id
ld_input.io.wait_for_prev_loop := ex_is_waiting_for_loads && ld_input.io.loop_id =/= ex.io.loop_id
// Wire up overloaded signals
ld_bias.io.rob_overloaded := ld_utilization >= max_lds.U
ld_input.io.rob_overloaded := ld_utilization >= max_lds.U
ld_weights.io.rob_overloaded := ld_utilization >= max_lds.U
ex.io.rob_overloaded := ex_utilization >= max_exs.U
st.io.rob_overloaded := st_utilization >= max_sts.U
// Wire up iterator inputs
ex.io.lda_completed := (ld_input.io.loop_id =/= ex.io.loop_id) || ld_input.io.idle
ex.io.ldb_completed := (ld_weights.io.loop_id =/= ex.io.loop_id) || ld_weights.io.idle
ex.io.ldd_completed := (ld_bias.io.loop_id =/= ex.io.loop_id) || ld_bias.io.idle
st.io.ex_completed := (ex.io.loop_id =/= st.io.loop_id) || ex.io.idle
// Create config registers
when(cmd.valid && is_loop_cmd && !loop_being_configured.configured) {
switch (cmd.bits.cmd.inst.funct) {
is (LOOP_CONV_WS_CONFIG_1) {
loop_being_configured.outer_bounds.out_channels := cmd.bits.cmd.rs1(63, 48)
loop_being_configured.outer_bounds.in_channels := cmd.bits.cmd.rs1(47, 32)
loop_being_configured.outer_bounds.in_row_dim := cmd.bits.cmd.rs1(31, 16)
loop_being_configured.outer_bounds.batch_size := cmd.bits.cmd.rs1(15, 0)
loop_being_configured.outer_bounds.padding := cmd.bits.cmd.rs2(63, 56)
loop_being_configured.outer_bounds.stride := cmd.bits.cmd.rs2(55, 48)
loop_being_configured.outer_bounds.out_col_dim := cmd.bits.cmd.rs2(47, 32)
loop_being_configured.outer_bounds.pool_out_row_dim := cmd.bits.cmd.rs2(31, 16)
loop_being_configured.outer_bounds.out_row_dim := cmd.bits.cmd.rs2(15, 0)
}
is (LOOP_CONV_WS_CONFIG_2) {
loop_being_configured.outer_bounds.kernel_dim := cmd.bits.cmd.rs1(63, 48)
loop_being_configured.outer_bounds.pool_out_col_dim := cmd.bits.cmd.rs1(47, 32)
loop_being_configured.outer_bounds.pool_size := (if (!has_max_pool) 1.U else cmd.bits.cmd.rs1(31, 16))
loop_being_configured.outer_bounds.pool_stride := (if (!has_max_pool) 1.U else cmd.bits.cmd.rs1(15, 8))
loop_being_configured.outer_bounds.pool_padding := (if (!has_max_pool) 0.U else cmd.bits.cmd.rs1(7, 0))
loop_being_configured.inner_bounds.batches := cmd.bits.cmd.rs2(63, 48)
loop_being_configured.inner_bounds.porows := cmd.bits.cmd.rs2(47, 32)
loop_being_configured.inner_bounds.pocols := cmd.bits.cmd.rs2(31, 16)
loop_being_configured.inner_bounds.pochs := cmd.bits.cmd.rs2(15, 0)
}
is (LOOP_CONV_WS_CONFIG_3) {
loop_being_configured.inner_bounds.krows := cmd.bits.cmd.rs1(63, 48)
loop_being_configured.inner_bounds.kcols := cmd.bits.cmd.rs1(47, 32)
loop_being_configured.inner_bounds.kchs := cmd.bits.cmd.rs1(31, 16)
loop_being_configured.inner_bounds.lpad := cmd.bits.cmd.rs1(15, 0)
loop_being_configured.inner_bounds.rpad := cmd.bits.cmd.rs2(63, 48)
loop_being_configured.inner_bounds.upad := cmd.bits.cmd.rs2(47, 32)
loop_being_configured.inner_bounds.dpad := cmd.bits.cmd.rs2(31, 24)
loop_being_configured.inner_bounds.plpad := cmd.bits.cmd.rs2(23, 16)
loop_being_configured.outer_bounds.in_col_dim := cmd.bits.cmd.rs2(15, 0)
}
is (LOOP_CONV_WS_CONFIG_4) {
loop_being_configured.inner_bounds.orows := cmd.bits.cmd.rs1(63, 48)
loop_being_configured.inner_bounds.prad := cmd.bits.cmd.rs1(47, 32)
loop_being_configured.inner_bounds.pupad := cmd.bits.cmd.rs1(31, 21)
loop_being_configured.inner_bounds.pdpad := cmd.bits.cmd.rs1(20, 10)
loop_being_configured.outer_bounds.kernel_dilation := cmd.bits.cmd.rs1(9, 0)
loop_being_configured.inner_bounds.ocols := cmd.bits.cmd.rs2(15, 0)
loop_being_configured.outer_bounds.in_stride := cmd.bits.cmd.rs2(63, 48)
loop_being_configured.outer_bounds.weight_stride := cmd.bits.cmd.rs2(47, 32)
loop_being_configured.outer_bounds.out_stride := cmd.bits.cmd.rs2(31, 16)
}
is (LOOP_CONV_WS_CONFIG_5) {
loop_being_configured.weights_dram_addr := cmd.bits.cmd.rs1
loop_being_configured.output_dram_addr := cmd.bits.cmd.rs2
}
is (LOOP_CONV_WS_CONFIG_6) {
loop_being_configured.bias_dram_addr := cmd.bits.cmd.rs1
loop_being_configured.input_dram_addr := cmd.bits.cmd.rs2
}
is (LOOP_CONV_WS) {
loop_being_configured.no_bias := cmd.bits.cmd.rs1(0)
// TODO we added a default value for max_pixels_per_row just to maintain backwards compatibility. we should deprecate and remove it later
val config_max_pixels_per_row = cmd.bits.cmd.rs1(15, 8)
loop_being_configured.max_pixels_per_row := Mux(
!has_first_layer_optimizations.B || config_max_pixels_per_row === 0.U,
1.U, config_max_pixels_per_row)
loop_being_configured.a_ex_spad_id := cmd.bits.cmd.rs1(19, 18)
loop_being_configured.b_ex_spad_id := cmd.bits.cmd.rs1(17, 16)
loop_being_configured.wrot180 := has_training_convs.B && cmd.bits.cmd.rs1(1)
loop_being_configured.input_dilated := has_training_convs.B && cmd.bits.cmd.rs2(2)
loop_being_configured.trans_output_1203 := has_training_convs.B && cmd.bits.cmd.rs1(2)
loop_being_configured.trans_weight_1203 := has_training_convs.B && cmd.bits.cmd.rs1(3)
loop_being_configured.trans_weight_0132 := has_training_convs.B && cmd.bits.cmd.rs1(4)
loop_being_configured.trans_input_3120 := has_training_convs.B && cmd.bits.cmd.rs1(5)
loop_being_configured.dw := has_dw_convs.B && cmd.bits.cmd.rs1(6)
loop_being_configured.no_pool := !has_max_pool.B || cmd.bits.cmd.rs2(0)
loop_being_configured.activation := cmd.bits.cmd.rs2(4,3)
loop_being_configured.downsample := cmd.bits.cmd.rs2(1)
loop_being_configured.configured := true.B
// assert(!loop_being_configured.input_dilated || loop_being_configured.outer_bounds.stride === 1.U)
// assert(!loop_being_configured.downsample || (loop_being_configured.outer_bounds.kernel_dim === 1.U && loop_being_configured.outer_bounds.stride === 2.U)) // TODO add the rest of the conditions that must be true for "downsample" to be enabled
}
}
}
// Wire up request signals
val ld_bias_addr_start = RegInit(0.U(log2Up(max_acc_addr).W))
val ex_c_addr_start = RegInit(0.U(log2Up(max_acc_addr).W))
val st_addr_start = RegInit(0.U(log2Up(max_acc_addr).W))
val loop_requesting_ld_bias_id = Mux(head_loop.ld_bias_started, tail_loop_id, head_loop_id)
val loop_requesting_ld_bias = loops(loop_requesting_ld_bias_id)
ld_bias.io.req.bits.outer_bounds := loop_requesting_ld_bias.outer_bounds
ld_bias.io.req.bits.inner_bounds := loop_requesting_ld_bias.inner_bounds
ld_bias.io.req.bits.derived_params := loop_requesting_ld_bias.derived_params()
ld_bias.io.req.bits.addr_start := ld_bias_addr_start
ld_bias.io.req.bits.dram_addr := loop_requesting_ld_bias.bias_dram_addr
ld_bias.io.req.bits.no_bias := loop_requesting_ld_bias.no_bias
ld_bias.io.req.bits.loop_id := loop_requesting_ld_bias_id
ld_bias.io.req.valid := !loop_requesting_ld_bias.ld_bias_started && loop_requesting_ld_bias.configured
when (ld_bias.io.req.fire) {
loop_requesting_ld_bias.running := true.B
loop_requesting_ld_bias.ld_bias_started := true.B
// when (loop_requesting_ld_bias.bias_dram_addr =/= 0.U) {
when (loop_requesting_ld_bias.output_dram_addr =/= 0.U) {
ld_bias_addr_start := floorAdd(ld_bias_addr_start, (max_acc_addr / concurrent_loops).U, max_acc_addr.U)
}
}
val loop_requesting_ld_input_id = Mux(head_loop.ld_input_started, tail_loop_id, head_loop_id)
val loop_requesting_ld_input = loops(loop_requesting_ld_input_id)
ld_input.io.req.bits.outer_bounds := loop_requesting_ld_input.outer_bounds
ld_input.io.req.bits.inner_bounds := loop_requesting_ld_input.inner_bounds
ld_input.io.req.bits.derived_params := loop_requesting_ld_input.derived_params()
ld_input.io.req.bits.addr_start := Mux(loop_requesting_ld_input.a_ex_spad_id === 0.U, loop_requesting_ld_input.a_addr_start, (loop_requesting_ld_input.a_ex_spad_id - 1.U) * (max_addr / concurrent_loops).U)
ld_input.io.req.bits.dram_addr := loop_requesting_ld_input.input_dram_addr
ld_input.io.req.bits.downsample := loop_requesting_ld_input.downsample
ld_input.io.req.bits.max_pixels_per_row := loop_requesting_ld_input.max_pixels_per_row
ld_input.io.req.bits.input_dilated := loop_requesting_ld_input.input_dilated
ld_input.io.req.bits.trans_input_3120 := loop_requesting_ld_input.trans_input_3120
ld_input.io.req.bits.loop_id := loop_requesting_ld_input_id
ld_input.io.req.valid := !loop_requesting_ld_input.ld_input_started && loop_requesting_ld_input.configured
when (ld_input.io.req.fire) {
loop_requesting_ld_input.running := true.B
loop_requesting_ld_input.ld_input_started := true.B
}
val loop_requesting_ld_weights_id = Mux(head_loop.ld_weights_started, tail_loop_id, head_loop_id)
val loop_requesting_ld_weights = loops(loop_requesting_ld_weights_id)
ld_weights.io.req.bits.outer_bounds := loop_requesting_ld_weights.outer_bounds
ld_weights.io.req.bits.inner_bounds := loop_requesting_ld_weights.inner_bounds
ld_weights.io.req.bits.derived_params := loop_requesting_ld_weights.derived_params()
ld_weights.io.req.bits.addr_end := Mux(loop_requesting_ld_weights.b_ex_spad_id === 0.U, loop_requesting_ld_weights.b_addr_end, (loop_requesting_ld_weights.b_ex_spad_id) * (max_addr / concurrent_loops).U)
ld_weights.io.req.bits.dram_addr := loop_requesting_ld_weights.weights_dram_addr
ld_weights.io.req.bits.trans_weight_1203 := loop_requesting_ld_weights.trans_weight_1203
ld_weights.io.req.bits.trans_weight_0132 := loop_requesting_ld_weights.trans_weight_0132
ld_weights.io.req.bits.dw := loop_requesting_ld_weights.dw
ld_weights.io.req.bits.loop_id := loop_requesting_ld_weights_id
ld_weights.io.req.valid := !loop_requesting_ld_weights.ld_weights_started && loop_requesting_ld_weights.configured
when (ld_weights.io.req.fire) {
loop_requesting_ld_weights.running := true.B
loop_requesting_ld_weights.ld_weights_started := true.B
}
val loop_requesting_ex_id = Mux(head_loop.ex_started, tail_loop_id, head_loop_id)
val loop_requesting_ex = loops(loop_requesting_ex_id)
ex.io.req.bits.outer_bounds := loop_requesting_ex.outer_bounds
ex.io.req.bits.inner_bounds := loop_requesting_ex.inner_bounds
ex.io.req.bits.derived_params := loop_requesting_ex.derived_params()
ex.io.req.bits.a_addr_start := Mux(loop_requesting_ex.a_ex_spad_id === 0.U, loop_requesting_ex.a_addr_start, (loop_requesting_ex.a_ex_spad_id - 1.U) * (max_addr / concurrent_loops).U)
ex.io.req.bits.b_addr_end := Mux(loop_requesting_ex.b_ex_spad_id === 0.U, loop_requesting_ex.b_addr_end, (loop_requesting_ex.b_ex_spad_id) * (max_addr / concurrent_loops).U)
ex.io.req.bits.c_addr_start := ex_c_addr_start
ex.io.req.bits.wrot180 := loop_requesting_ex.wrot180
ex.io.req.bits.downsample := loop_requesting_ex.downsample
ex.io.req.bits.max_pixels_per_row := loop_requesting_ex.max_pixels_per_row
ex.io.req.bits.input_dilated := loop_requesting_ex.input_dilated
ex.io.req.bits.trans_weight_0132 := loop_requesting_ex.trans_weight_0132
ex.io.req.bits.trans_input_3120 := loop_requesting_ex.trans_input_3120
ex.io.req.bits.loop_id := loop_requesting_ex_id
ex.io.req.valid := !loop_requesting_ex.ex_started && loop_requesting_ex.ld_bias_started &&
loop_requesting_ex.ld_input_started && loop_requesting_ex.ld_weights_started && loop_requesting_ex.configured
when (ex.io.req.fire) {
loop_requesting_ex.running := true.B
loop_requesting_ex.ex_started := true.B
when (loop_requesting_ex.output_dram_addr =/= 0.U) {
ex_c_addr_start := floorAdd(ex_c_addr_start, (max_acc_addr / concurrent_loops).U, max_acc_addr.U)
}
}
val loop_requesting_st_id = Mux(head_loop.st_started, tail_loop_id, head_loop_id)
val loop_requesting_st = loops(loop_requesting_st_id)
st.io.req.bits.outer_bounds := loop_requesting_st.outer_bounds
st.io.req.bits.inner_bounds := loop_requesting_st.inner_bounds
st.io.req.bits.derived_params := loop_requesting_st.derived_params()
st.io.req.bits.addr_start := st_addr_start
st.io.req.bits.dram_addr := loop_requesting_st.output_dram_addr
st.io.req.bits.no_pool := loop_requesting_st.no_pool
st.io.req.bits.activation := loop_requesting_st.activation
st.io.req.bits.trans_output_1203 := loop_requesting_st.trans_output_1203
st.io.req.bits.loop_id := loop_requesting_st_id
st.io.req.valid := !loop_requesting_st.st_started && loop_requesting_st.ex_started && loop_requesting_st.configured
when (st.io.req.fire) {
loop_requesting_st.running := true.B
loop_requesting_st.st_started := true.B
when (loop_requesting_st.output_dram_addr =/= 0.U) {
st_addr_start := floorAdd(st_addr_start, (max_acc_addr / concurrent_loops).U, max_acc_addr.U)
}
}
// Handle completed signals
when (ld_bias.io.idle && loops(ld_bias.io.loop_id).running && loops(ld_bias.io.loop_id).ld_bias_started) {
loops(ld_bias.io.loop_id).ld_bias_completed := true.B
}
when (ld_input.io.idle && loops(ld_input.io.loop_id).running && loops(ld_input.io.loop_id).ld_input_started) {
loops(ld_input.io.loop_id).ld_input_completed := true.B
}
when (ld_weights.io.idle && loops(ld_weights.io.loop_id).running && loops(ld_weights.io.loop_id).ld_weights_started) {
loops(ld_weights.io.loop_id).ld_weights_completed := true.B
}
when (ex.io.idle && loops(ex.io.loop_id).running && loops(ex.io.loop_id).ex_started) {
loops(ex.io.loop_id).ex_completed := true.B
}
when (st.io.idle && loops(st.io.loop_id).running && loops(st.io.loop_id).st_started) {
loops(st.io.loop_id).st_completed := true.B
}
when (head_loop.running && head_loop.all_completed()) {
head_loop.reset()
head_loop_id := ~head_loop_id
}
// Resets
when (reset.asBool) {
loops.zipWithIndex.foreach { case (l, i) =>
l.reset()
l.a_addr_start := (i * (max_addr / concurrent_loops)).U
l.b_addr_end := ((i+1) * (max_addr / concurrent_loops)).U
}
}
}
object LoopConv {
def apply(in: DecoupledIO[GemminiCmd], ld_completed: UInt, st_completed: UInt, ex_completed: UInt,
block_size: Int, coreMaxAddrBits: Int, rob_size: Int, max_lds: Int, max_exs: Int, max_sts: Int,
max_addr: Int, max_acc_addr: Int, input_w: Int, acc_w: Int, dma_max_bytes: Int,
config_mvin_rs1_t: ConfigMvinRs1, mvin_rs2_t: MvinRs2, config_mvout_rs2_t: ConfigMvoutRs2,
mvout_rs2_t: MvoutRs2, config_ex_rs1_t: ConfigExRs1, preload_rs1_t: PreloadRs, preload_rs2_t: PreloadRs,
compute_rs1_t: ComputeRs, compute_rs2_t: ComputeRs, has_training_convs: Boolean, has_max_pool: Boolean,
has_first_layer_optimizations: Boolean, has_dw_convs: Boolean)
(implicit p: Parameters): (DecoupledIO[GemminiCmd], Bool) = {
val mod = Module(new LoopConv(block_size, coreMaxAddrBits, rob_size, max_lds, max_exs, max_sts,
max_addr, max_acc_addr, input_w, acc_w, dma_max_bytes,
config_mvin_rs1_t, mvin_rs2_t, config_mvout_rs2_t, mvout_rs2_t, config_ex_rs1_t, preload_rs1_t, preload_rs2_t,
compute_rs1_t, compute_rs2_t, has_training_convs, has_max_pool, has_first_layer_optimizations, has_dw_convs))
mod.io.in <> in
mod.io.ld_completed := ld_completed
mod.io.st_completed := st_completed
mod.io.ex_completed := ex_completed
(mod.io.out, mod.io.busy)
}
def castDramOffset(dram_offset: UInt): UInt = {
// Cast dram offsets to 32 bits max
dram_offset & "hFFFFFFFF".U
}
}
File LocalAddr.scala:
package gemmini
import chisel3._
import chisel3.util._
class LocalAddr(sp_banks: Int, sp_bank_entries: Int, acc_banks: Int, acc_bank_entries: Int) extends Bundle {
private val localAddrBits = 32 // TODO magic number
private val spAddrBits = log2Ceil(sp_banks * sp_bank_entries)
private val accAddrBits = log2Ceil(acc_banks * acc_bank_entries)
private val maxAddrBits = spAddrBits max accAddrBits
private val spBankBits = log2Up(sp_banks)
private val spBankRowBits = log2Up(sp_bank_entries)
private val accBankBits = log2Up(acc_banks)
val accBankRowBits = log2Up(acc_bank_entries)
val spRows = sp_banks * sp_bank_entries
val is_acc_addr = Bool()
val accumulate = Bool()
val read_full_acc_row = Bool()
val norm_cmd = NormCmd()
private val metadata_w = is_acc_addr.getWidth + accumulate.getWidth + read_full_acc_row.getWidth + norm_cmd.getWidth
assert(maxAddrBits + metadata_w < 32)
val garbage = UInt(((localAddrBits - maxAddrBits - metadata_w - 1) max 0).W)
val garbage_bit = if (localAddrBits - maxAddrBits >= metadata_w + 1) UInt(1.W) else UInt(0.W)
val data = UInt(maxAddrBits.W)
def sp_bank(dummy: Int = 0) = if (spAddrBits == spBankRowBits) 0.U else data(spAddrBits - 1, spBankRowBits)
def sp_row(dummy: Int = 0) = data(spBankRowBits - 1, 0)
def acc_bank(dummy: Int = 0) = if (accAddrBits == accBankRowBits) 0.U else data(accAddrBits - 1, accBankRowBits)
def acc_row(dummy: Int = 0) = data(accBankRowBits - 1, 0)
def full_sp_addr(dummy: Int = 0) = data(spAddrBits - 1, 0)
def full_acc_addr(dummy: Int = 0) = data(accAddrBits - 1, 0)
def is_same_address(other: LocalAddr): Bool = is_acc_addr === other.is_acc_addr && data === other.data
def is_same_address(other: UInt): Bool = is_same_address(other.asTypeOf(this))
def is_garbage(dummy: Int = 0) = is_acc_addr && accumulate && read_full_acc_row && data.andR &&
(if (garbage_bit.getWidth > 0) garbage_bit.asBool else true.B)
def +(other: UInt) = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val result = WireInit(this)
result.data := data + other
result
}
def <=(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() <= other.full_acc_addr(), full_sp_addr() <= other.full_sp_addr())
def <(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() < other.full_acc_addr(), full_sp_addr() < other.full_sp_addr())
def >(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() > other.full_acc_addr(), full_sp_addr() > other.full_sp_addr())
def add_with_overflow(other: UInt): Tuple2[LocalAddr, Bool] = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val sum = data +& other
val overflow = Mux(is_acc_addr, sum(accAddrBits), sum(spAddrBits))
val result = WireInit(this)
result.data := sum(maxAddrBits - 1, 0)
(result, overflow)
}
// This function can only be used with non-accumulator addresses. Returns both new address and underflow
def floorSub(other: UInt, floor: UInt): (LocalAddr, Bool) = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val underflow = data < (floor +& other)
val result = WireInit(this)
result.data := Mux(underflow, floor, data - other)
(result, underflow)
}
def make_this_garbage(dummy: Int = 0): Unit = {
is_acc_addr := true.B
accumulate := true.B
read_full_acc_row := true.B
garbage_bit := 1.U
data := ~(0.U(maxAddrBits.W))
}
}
object LocalAddr {
def cast_to_local_addr[T <: Data](local_addr_t: LocalAddr, t: T): LocalAddr = {
// This convenience function is basically the same as calling "asTypeOf(local_addr_t)". However, this convenience
// function will also cast unnecessary garbage bits to 0, which may help reduce multiplier/adder bitwidths
val result = WireInit(t.asTypeOf(local_addr_t))
if (result.garbage_bit.getWidth > 0) result.garbage := 0.U
result
}
def cast_to_sp_addr[T <: Data](local_addr_t: LocalAddr, t: T): LocalAddr = {
// This function is a wrapper around cast_to_local_addr, but it assumes that the input will not be the garbage
// address
val result = WireInit(cast_to_local_addr(local_addr_t, t))
result.is_acc_addr := false.B
result.accumulate := false.B
result.read_full_acc_row := false.B
// assert(!result.garbage_bit, "cast_to_sp_addr doesn't work on garbage addresses")
result
}
def cast_to_acc_addr[T <: Data](local_addr_t: LocalAddr, t: T, accumulate: Bool, read_full: Bool): LocalAddr = {
// This function is a wrapper around cast_to_local_addr, but it assumes that the input will not be the garbage
// address
val result = WireInit(cast_to_local_addr(local_addr_t, t))
result.is_acc_addr := true.B
result.accumulate := accumulate
result.read_full_acc_row := read_full
// assert(!result.garbage_bit, "cast_to_acc_addr doesn't work on garbage addresses")
result
}
def garbage_addr(local_addr_t: LocalAddr): LocalAddr = {
val result = Wire(chiselTypeOf(local_addr_t))
result := DontCare
result.make_this_garbage()
result
}
}
File Util.scala:
package gemmini
import chisel3._
import chisel3.util._
object Util {
def wrappingAdd(u: UInt, n: UInt, max_plus_one: Int): UInt = {
val max = max_plus_one - 1
if (max == 0) {
0.U
} else {
assert(n <= max.U, "cannot wrapAdd when n is larger than max")
Mux(u >= max.U - n + 1.U && n =/= 0.U, n - (max.U - u) - 1.U, u + n)
}
}
def wrappingAdd(u: UInt, n: UInt, max_plus_one: UInt, en: Bool = true.B): UInt = {
val max = max_plus_one - 1.U
assert(n <= max || max === 0.U, "cannot wrapAdd when n is larger than max, unless max is 0")
/*
Mux(!en, u,
Mux (max === 0.U, 0.U,
Mux(u >= max - n + 1.U && n =/= 0.U, n - (max - u) - 1.U, u + n)))
*/
MuxCase(u + n, Seq(
(!en) -> u,
(max === 0.U) -> 0.U,
(u >= max - n + 1.U && n =/= 0.U) -> (n - (max - u) - 1.U)
))
}
def satAdd(u: UInt, v: UInt, max: UInt): UInt = {
Mux(u +& v > max, max, u + v)
}
def floorAdd(u: UInt, n: UInt, max_plus_one: UInt, en: Bool = true.B): UInt = {
val max = max_plus_one - 1.U
MuxCase(u + n, Seq(
(!en) -> u,
((u +& n) > max) -> 0.U
))
}
def sFloorAdd(s: SInt, n: UInt, max_plus_one: SInt, min: SInt, en: Bool = true.B): SInt = {
val max = max_plus_one - 1.S
MuxCase(s + n.zext, Seq(
(!en) -> s,
((s +& n.zext) > max) -> min
))
}
def wrappingSub(u: UInt, n: UInt, max_plus_one: Int): UInt = {
val max = max_plus_one - 1
assert(n <= max.U, "cannot wrapSub when n is larger than max")
Mux(u < n, max.U - (n-u) + 1.U, u - n)
}
def ceilingDivide(numer: Int, denom: Int): Int = {
if (numer % denom == 0) { numer / denom }
else { numer / denom + 1}
}
def closestLowerPowerOf2(u: UInt): UInt = {
// TODO figure out a more efficient way of doing this. Is this many muxes really necessary?
val exp = u.asBools.zipWithIndex.map { case (b, i) =>
Mux(b, i.U, 0.U)
}.reduce((acc, u) => Mux(acc > u, acc, u))
(1.U << exp).asUInt
}
def closestAlignedLowerPowerOf2(u: UInt, addr: UInt, stride: UInt, rowBytes: Int): UInt = {
val lgRowBytes = log2Ceil(rowBytes)
// TODO figure out a more efficient way of doing this. Is this many muxes really necessary?
val exp = u.asBools.zipWithIndex.map { case (b, i) =>
Mux(b && addr(i + lgRowBytes - 1, 0) === 0.U && stride(i + lgRowBytes - 1, 0) === 0.U, i.U, 0.U)
}.reduce((acc, u) => Mux(acc > u, acc, u))
(1.U << exp).asUInt
}
// This function will return "next" with a 0-cycle delay when the "enable" signal is high. It's like a queue with
// the "pipe" and "flow" parameters set to "true"
def RegEnableThru[T <: Data](next: T, enable: Bool): T = {
val buf = RegEnable(next, enable)
Mux(enable, next, buf)
}
def RegEnableThru[T <: Data](next: T, init: T, enable: Bool): T = {
val buf = RegEnable(next, init, enable)
Mux(enable, next, buf)
}
def maxOf(u1: UInt, u2: UInt): UInt = {
Mux(u1 > u2, u1, u2)
}
def maxOf[T <: Data](x: T, y: T)(implicit ev: Arithmetic[T]): T = {
import ev._
Mux(x > y, x, y)
}
def minOf(u1: UInt, u2: UInt): UInt = {
Mux(u1 < u2, u1, u2)
}
def accumulateTree[T <: Data](xs: Seq[T])(implicit ev: Arithmetic[T]): T = {
import ev._
assert(xs.nonEmpty, "can't accumulate 0 elements")
if (xs.length == 1) {
xs.head
} else {
val upperRowLen = 1 << log2Ceil(xs.length)
val upperRow = xs.padTo(upperRowLen, xs.head.zero)
val pairs = upperRow.grouped(2)
val lowerRow = pairs.map { case Seq(a, b) => a + b }
accumulateTree(lowerRow.toSeq)
}
}
// An undirectioned Valid bundle
class UDValid[T <: Data](t: T) extends Bundle {
val valid = Bool()
val bits = t.cloneType
def push(b: T): Unit = {
valid := true.B
bits := b
}
def pop(dummy: Int = 0): T = {
valid := false.B
bits
}
}
object UDValid {
def apply[T <: Data](t: T): UDValid[T] = new UDValid(t)
}
// creates a Reg and the next-state Wire, and returns both
def regwire(bits: Int) = {
val wire = Wire(UInt(bits.W))
val reg = RegNext(wire)
wire := reg // default wire to read from reg
(reg, wire)
}
}
| module LoopConvLdWeight( // @[LoopConv.scala:414:7]
input clock, // @[LoopConv.scala:414:7]
input reset, // @[LoopConv.scala:414:7]
output io_req_ready, // @[LoopConv.scala:420:14]
input io_req_valid, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_batch_size, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_in_row_dim, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_in_col_dim, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_in_channels, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_out_channels, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_out_col_dim, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_out_row_dim, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_out_stride, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_in_stride, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_weight_stride, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_pool_out_row_dim, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_pool_out_col_dim, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_stride, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_padding, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_kernel_dim, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_kernel_dilation, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_pool_size, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_pool_stride, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_outer_bounds_pool_padding, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_batches, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_porows, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_pocols, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_pochs, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_krows, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_kcols, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_kchs, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_lpad, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_rpad, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_upad, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_dpad, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_plpad, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_prad, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_pupad, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_pdpad, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_orows, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_inner_bounds_ocols, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_ochs, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_irows, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_icols, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_irows_unpadded, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_icols_unpadded, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_ichs, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_out_channels_per_bank, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_in_channels_per_bank, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_bias_spad_stride, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_input_spad_stride, // @[LoopConv.scala:420:14]
input [15:0] io_req_bits_derived_params_weight_spad_stride, // @[LoopConv.scala:420:14]
input [14:0] io_req_bits_addr_end, // @[LoopConv.scala:420:14]
input [39:0] io_req_bits_dram_addr, // @[LoopConv.scala:420:14]
input io_req_bits_trans_weight_1203, // @[LoopConv.scala:420:14]
input io_req_bits_trans_weight_0132, // @[LoopConv.scala:420:14]
input io_req_bits_dw, // @[LoopConv.scala:420:14]
input io_req_bits_loop_id, // @[LoopConv.scala:420:14]
input io_cmd_ready, // @[LoopConv.scala:420:14]
output io_cmd_valid, // @[LoopConv.scala:420:14]
output [6:0] io_cmd_bits_inst_funct, // @[LoopConv.scala:420:14]
output [4:0] io_cmd_bits_inst_rs2, // @[LoopConv.scala:420:14]
output [4:0] io_cmd_bits_inst_rs1, // @[LoopConv.scala:420:14]
output io_cmd_bits_inst_xd, // @[LoopConv.scala:420:14]
output io_cmd_bits_inst_xs1, // @[LoopConv.scala:420:14]
output io_cmd_bits_inst_xs2, // @[LoopConv.scala:420:14]
output [4:0] io_cmd_bits_inst_rd, // @[LoopConv.scala:420:14]
output [6:0] io_cmd_bits_inst_opcode, // @[LoopConv.scala:420:14]
output [63:0] io_cmd_bits_rs1, // @[LoopConv.scala:420:14]
output [63:0] io_cmd_bits_rs2, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_debug, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_cease, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_wfi, // @[LoopConv.scala:420:14]
output [31:0] io_cmd_bits_status_isa, // @[LoopConv.scala:420:14]
output [1:0] io_cmd_bits_status_dprv, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_dv, // @[LoopConv.scala:420:14]
output [1:0] io_cmd_bits_status_prv, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_v, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_sd, // @[LoopConv.scala:420:14]
output [22:0] io_cmd_bits_status_zero2, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_mpv, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_gva, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_mbe, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_sbe, // @[LoopConv.scala:420:14]
output [1:0] io_cmd_bits_status_sxl, // @[LoopConv.scala:420:14]
output [1:0] io_cmd_bits_status_uxl, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_sd_rv32, // @[LoopConv.scala:420:14]
output [7:0] io_cmd_bits_status_zero1, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_tsr, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_tw, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_tvm, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_mxr, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_sum, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_mprv, // @[LoopConv.scala:420:14]
output [1:0] io_cmd_bits_status_xs, // @[LoopConv.scala:420:14]
output [1:0] io_cmd_bits_status_fs, // @[LoopConv.scala:420:14]
output [1:0] io_cmd_bits_status_mpp, // @[LoopConv.scala:420:14]
output [1:0] io_cmd_bits_status_vs, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_spp, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_mpie, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_ube, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_spie, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_upie, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_mie, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_hie, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_sie, // @[LoopConv.scala:420:14]
output io_cmd_bits_status_uie, // @[LoopConv.scala:420:14]
output io_idle, // @[LoopConv.scala:420:14]
input io_rob_overloaded, // @[LoopConv.scala:420:14]
input io_wait_for_prev_loop, // @[LoopConv.scala:420:14]
output io_loop_id // @[LoopConv.scala:420:14]
);
wire _mvin_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr; // @[LocalAddr.scala:108:37]
wire _mvin_cmd_rs2_local_addr_result_result_WIRE_accumulate; // @[LocalAddr.scala:108:37]
wire _mvin_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row; // @[LocalAddr.scala:108:37]
wire [2:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_norm_cmd; // @[LocalAddr.scala:108:37]
wire _mvin_cmd_rs2_local_addr_result_result_WIRE_garbage_bit; // @[LocalAddr.scala:108:37]
wire [13:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_data; // @[LocalAddr.scala:108:37]
wire [6:0] mvin_cmd_rs2_num_cols; // @[LoopConv.scala:536:28]
wire [2:0] mvin_cmd_rs2_local_addr_norm_cmd; // @[LoopConv.scala:536:28]
wire _command_p_io_in_ready; // @[LoopConv.scala:493:25]
wire _command_p_io_out_valid; // @[LoopConv.scala:493:25]
wire [6:0] _command_p_io_out_bits_cmd_inst_funct; // @[LoopConv.scala:493:25]
wire [63:0] _command_p_io_out_bits_cmd_rs1; // @[LoopConv.scala:493:25]
wire [63:0] _command_p_io_out_bits_cmd_rs2; // @[LoopConv.scala:493:25]
wire [67:0] _command_p_io_out_bits_dram_addr; // @[LoopConv.scala:493:25]
wire [63:0] _command_p_io_out_bits_spad_addr; // @[LoopConv.scala:493:25]
wire [15:0] _command_p_io_out_bits_K; // @[LoopConv.scala:493:25]
wire [15:0] _command_p_io_out_bits_J; // @[LoopConv.scala:493:25]
wire _command_p_io_busy; // @[LoopConv.scala:493:25]
wire io_req_valid_0 = io_req_valid; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_batch_size_0 = io_req_bits_outer_bounds_batch_size; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_in_row_dim_0 = io_req_bits_outer_bounds_in_row_dim; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_in_col_dim_0 = io_req_bits_outer_bounds_in_col_dim; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_in_channels_0 = io_req_bits_outer_bounds_in_channels; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_out_channels_0 = io_req_bits_outer_bounds_out_channels; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_out_col_dim_0 = io_req_bits_outer_bounds_out_col_dim; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_out_row_dim_0 = io_req_bits_outer_bounds_out_row_dim; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_out_stride_0 = io_req_bits_outer_bounds_out_stride; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_in_stride_0 = io_req_bits_outer_bounds_in_stride; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_weight_stride_0 = io_req_bits_outer_bounds_weight_stride; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_pool_out_row_dim_0 = io_req_bits_outer_bounds_pool_out_row_dim; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_pool_out_col_dim_0 = io_req_bits_outer_bounds_pool_out_col_dim; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_stride_0 = io_req_bits_outer_bounds_stride; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_padding_0 = io_req_bits_outer_bounds_padding; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_kernel_dim_0 = io_req_bits_outer_bounds_kernel_dim; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_kernel_dilation_0 = io_req_bits_outer_bounds_kernel_dilation; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_pool_size_0 = io_req_bits_outer_bounds_pool_size; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_pool_stride_0 = io_req_bits_outer_bounds_pool_stride; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_outer_bounds_pool_padding_0 = io_req_bits_outer_bounds_pool_padding; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_batches_0 = io_req_bits_inner_bounds_batches; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_porows_0 = io_req_bits_inner_bounds_porows; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_pocols_0 = io_req_bits_inner_bounds_pocols; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_pochs_0 = io_req_bits_inner_bounds_pochs; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_krows_0 = io_req_bits_inner_bounds_krows; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_kcols_0 = io_req_bits_inner_bounds_kcols; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_kchs_0 = io_req_bits_inner_bounds_kchs; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_lpad_0 = io_req_bits_inner_bounds_lpad; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_rpad_0 = io_req_bits_inner_bounds_rpad; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_upad_0 = io_req_bits_inner_bounds_upad; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_dpad_0 = io_req_bits_inner_bounds_dpad; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_plpad_0 = io_req_bits_inner_bounds_plpad; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_prad_0 = io_req_bits_inner_bounds_prad; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_pupad_0 = io_req_bits_inner_bounds_pupad; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_pdpad_0 = io_req_bits_inner_bounds_pdpad; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_orows_0 = io_req_bits_inner_bounds_orows; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_inner_bounds_ocols_0 = io_req_bits_inner_bounds_ocols; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_ochs_0 = io_req_bits_derived_params_ochs; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_irows_0 = io_req_bits_derived_params_irows; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_icols_0 = io_req_bits_derived_params_icols; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_irows_unpadded_0 = io_req_bits_derived_params_irows_unpadded; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_icols_unpadded_0 = io_req_bits_derived_params_icols_unpadded; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_ichs_0 = io_req_bits_derived_params_ichs; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_out_channels_per_bank_0 = io_req_bits_derived_params_out_channels_per_bank; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_in_channels_per_bank_0 = io_req_bits_derived_params_in_channels_per_bank; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_bias_spad_stride_0 = io_req_bits_derived_params_bias_spad_stride; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_input_spad_stride_0 = io_req_bits_derived_params_input_spad_stride; // @[LoopConv.scala:414:7]
wire [15:0] io_req_bits_derived_params_weight_spad_stride_0 = io_req_bits_derived_params_weight_spad_stride; // @[LoopConv.scala:414:7]
wire [14:0] io_req_bits_addr_end_0 = io_req_bits_addr_end; // @[LoopConv.scala:414:7]
wire [39:0] io_req_bits_dram_addr_0 = io_req_bits_dram_addr; // @[LoopConv.scala:414:7]
wire io_req_bits_trans_weight_1203_0 = io_req_bits_trans_weight_1203; // @[LoopConv.scala:414:7]
wire io_req_bits_trans_weight_0132_0 = io_req_bits_trans_weight_0132; // @[LoopConv.scala:414:7]
wire io_req_bits_dw_0 = io_req_bits_dw; // @[LoopConv.scala:414:7]
wire io_req_bits_loop_id_0 = io_req_bits_loop_id; // @[LoopConv.scala:414:7]
wire io_cmd_ready_0 = io_cmd_ready; // @[LoopConv.scala:414:7]
wire io_rob_overloaded_0 = io_rob_overloaded; // @[LoopConv.scala:414:7]
wire io_wait_for_prev_loop_0 = io_wait_for_prev_loop; // @[LoopConv.scala:414:7]
wire [4:0] config_cmd_rs1_pixel_repeats = 5'h1; // @[LoopConv.scala:500:28]
wire [2:0] config_cmd_rs1__spacer1 = 3'h0; // @[LoopConv.scala:500:28]
wire [2:0] config_cmd_rs1__spacer0 = 3'h0; // @[LoopConv.scala:500:28]
wire [1:0] config_cmd_rs1_state_id = 2'h1; // @[LoopConv.scala:500:28, :524:41]
wire [1:0] config_cmd_rs1__unused = 2'h1; // @[LoopConv.scala:500:28, :524:41]
wire [2:0] config_cmd_rs1_lo_lo = 3'h1; // @[LoopConv.scala:508:36]
wire [7:0] config_cmd_rs1_lo_hi_hi = 8'h8; // @[LoopConv.scala:508:36]
wire [9:0] config_cmd_rs1_lo_hi = 10'h21; // @[LoopConv.scala:508:36]
wire [12:0] config_cmd_rs1_lo = 13'h109; // @[LoopConv.scala:508:36]
wire [31:0] config_cmd_rs1_scale = 32'h3F800000; // @[LoopConv.scala:500:28]
wire [31:0] config_cmd_rs1_hi_hi_hi = 32'h3F800000; // @[LoopConv.scala:508:36]
wire [33:0] config_cmd_rs1_hi_hi = 34'hFE000000; // @[LoopConv.scala:508:36]
wire [6:0] mvin_cmd_inst_funct = 7'h1; // @[LoopConv.scala:512:22, :533:46]
wire [63:0] mvin_cmd_rs1 = 64'h0; // @[LoopConv.scala:512:22]
wire [63:0] mvin_cmd_rs2 = 64'h0; // @[LoopConv.scala:512:22]
wire [4:0] config_cmd_inst_rs2 = 5'h0; // @[LoopConv.scala:496:24]
wire [4:0] config_cmd_inst_rs1 = 5'h0; // @[LoopConv.scala:496:24]
wire [4:0] config_cmd_inst_rd = 5'h0; // @[LoopConv.scala:496:24]
wire [4:0] mvin_cmd_inst_rs2 = 5'h0; // @[LoopConv.scala:512:22]
wire [4:0] mvin_cmd_inst_rs1 = 5'h0; // @[LoopConv.scala:512:22]
wire [4:0] mvin_cmd_inst_rd = 5'h0; // @[LoopConv.scala:512:22]
wire [4:0] _command_p_io_in_bits_cmd_T_1_inst_rs2 = 5'h0; // @[LoopConv.scala:524:34]
wire [4:0] _command_p_io_in_bits_cmd_T_1_inst_rs1 = 5'h0; // @[LoopConv.scala:524:34]
wire [4:0] _command_p_io_in_bits_cmd_T_1_inst_rd = 5'h0; // @[LoopConv.scala:524:34]
wire [6:0] config_cmd_inst_funct = 7'h0; // @[LoopConv.scala:496:24]
wire [6:0] config_cmd_inst_opcode = 7'h0; // @[LoopConv.scala:496:24]
wire [6:0] mvin_cmd_inst_opcode = 7'h0; // @[LoopConv.scala:512:22]
wire [6:0] _command_p_io_in_bits_cmd_T_1_inst_opcode = 7'h0; // @[LoopConv.scala:524:34]
wire [31:0] config_cmd_status_isa = 32'h0; // @[LoopConv.scala:496:24]
wire [31:0] mvin_cmd_status_isa = 32'h0; // @[LoopConv.scala:512:22]
wire [31:0] _command_p_io_in_bits_cmd_T_1_status_isa = 32'h0; // @[LoopConv.scala:524:34]
wire [22:0] config_cmd_status_zero2 = 23'h0; // @[LoopConv.scala:496:24]
wire [22:0] mvin_cmd_status_zero2 = 23'h0; // @[LoopConv.scala:512:22]
wire [22:0] _command_p_io_in_bits_cmd_T_1_status_zero2 = 23'h0; // @[LoopConv.scala:524:34]
wire [7:0] config_cmd_status_zero1 = 8'h0; // @[LoopConv.scala:496:24]
wire [7:0] mvin_cmd_status_zero1 = 8'h0; // @[LoopConv.scala:512:22]
wire [7:0] _command_p_io_in_bits_cmd_T_1_status_zero1 = 8'h0; // @[LoopConv.scala:524:34]
wire [8:0] mvin_cmd_rs2__spacer1 = 9'h0; // @[LoopConv.scala:536:28]
wire [10:0] mvin_cmd_rs2__spacer2 = 11'h0; // @[LoopConv.scala:536:28]
wire [10:0] mvin_cmd_rs2_local_addr_garbage = 11'h0; // @[LoopConv.scala:536:28]
wire [10:0] mvin_cmd_rs2_local_addr_result_result_garbage = 11'h0; // @[LocalAddr.scala:108:26]
wire [10:0] mvin_cmd_rs2_local_addr_result_garbage = 11'h0; // @[LocalAddr.scala:116:26]
wire [1:0] config_cmd_status_dprv = 2'h0; // @[LoopConv.scala:435:22, :496:24]
wire [1:0] config_cmd_status_prv = 2'h0; // @[LoopConv.scala:435:22, :496:24]
wire [1:0] config_cmd_status_sxl = 2'h0; // @[LoopConv.scala:435:22, :496:24]
wire [1:0] config_cmd_status_uxl = 2'h0; // @[LoopConv.scala:435:22, :496:24]
wire [1:0] config_cmd_status_xs = 2'h0; // @[LoopConv.scala:435:22, :496:24]
wire [1:0] config_cmd_status_fs = 2'h0; // @[LoopConv.scala:435:22, :496:24]
wire [1:0] config_cmd_status_mpp = 2'h0; // @[LoopConv.scala:435:22, :496:24]
wire [1:0] config_cmd_status_vs = 2'h0; // @[LoopConv.scala:435:22, :496:24]
wire [1:0] config_cmd_rs1__spacer2 = 2'h0; // @[LoopConv.scala:435:22, :500:28]
wire [1:0] mvin_cmd_status_dprv = 2'h0; // @[LoopConv.scala:435:22, :512:22]
wire [1:0] mvin_cmd_status_prv = 2'h0; // @[LoopConv.scala:435:22, :512:22]
wire [1:0] mvin_cmd_status_sxl = 2'h0; // @[LoopConv.scala:435:22, :512:22]
wire [1:0] mvin_cmd_status_uxl = 2'h0; // @[LoopConv.scala:435:22, :512:22]
wire [1:0] mvin_cmd_status_xs = 2'h0; // @[LoopConv.scala:435:22, :512:22]
wire [1:0] mvin_cmd_status_fs = 2'h0; // @[LoopConv.scala:435:22, :512:22]
wire [1:0] mvin_cmd_status_mpp = 2'h0; // @[LoopConv.scala:435:22, :512:22]
wire [1:0] mvin_cmd_status_vs = 2'h0; // @[LoopConv.scala:435:22, :512:22]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_dprv = 2'h0; // @[LoopConv.scala:435:22, :524:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_prv = 2'h0; // @[LoopConv.scala:435:22, :524:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_sxl = 2'h0; // @[LoopConv.scala:435:22, :524:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_uxl = 2'h0; // @[LoopConv.scala:435:22, :524:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_xs = 2'h0; // @[LoopConv.scala:435:22, :524:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_fs = 2'h0; // @[LoopConv.scala:435:22, :524:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_mpp = 2'h0; // @[LoopConv.scala:435:22, :524:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_vs = 2'h0; // @[LoopConv.scala:435:22, :524:34]
wire [1:0] io_cmd_bits_rs2_hi_hi = 2'h0; // @[LoopConv.scala:435:22, :541:37]
wire config_cmd_inst_xd = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_inst_xs1 = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_inst_xs2 = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_debug = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_cease = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_wfi = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_dv = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_v = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_sd = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_mpv = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_gva = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_mbe = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_sbe = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_sd_rv32 = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_tsr = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_tw = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_tvm = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_mxr = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_sum = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_mprv = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_spp = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_mpie = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_ube = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_spie = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_upie = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_mie = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_hie = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_sie = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_status_uie = 1'h0; // @[LoopConv.scala:496:24]
wire config_cmd_rs1_shrink = 1'h0; // @[LoopConv.scala:500:28]
wire mvin_cmd_inst_xd = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_inst_xs1 = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_inst_xs2 = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_debug = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_cease = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_wfi = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_dv = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_v = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_sd = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_mpv = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_gva = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_mbe = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_sbe = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_sd_rv32 = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_tsr = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_tw = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_tvm = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_mxr = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_sum = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_mprv = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_spp = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_mpie = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_ube = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_spie = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_upie = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_mie = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_hie = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_sie = 1'h0; // @[LoopConv.scala:512:22]
wire mvin_cmd_status_uie = 1'h0; // @[LoopConv.scala:512:22]
wire _io_req_ready_T_2; // @[LoopConv.scala:519:34]
wire _command_p_io_in_bits_cmd_T_1_inst_xd = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_inst_xs1 = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_inst_xs2 = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_debug = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_cease = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_wfi = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_dv = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_v = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_sd = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_mpv = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_gva = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_mbe = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_sbe = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_sd_rv32 = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_tsr = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_tw = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_tvm = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_mxr = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_sum = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_mprv = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_spp = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_mpie = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_ube = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_spie = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_upie = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_mie = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_hie = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_sie = 1'h0; // @[LoopConv.scala:524:34]
wire _command_p_io_in_bits_cmd_T_1_status_uie = 1'h0; // @[LoopConv.scala:524:34]
wire mvin_cmd_rs2_local_addr_is_acc_addr = 1'h0; // @[LoopConv.scala:536:28]
wire mvin_cmd_rs2_local_addr_accumulate = 1'h0; // @[LoopConv.scala:536:28]
wire mvin_cmd_rs2_local_addr_read_full_acc_row = 1'h0; // @[LoopConv.scala:536:28]
wire mvin_cmd_rs2_local_addr_result_is_acc_addr = 1'h0; // @[LocalAddr.scala:116:26]
wire mvin_cmd_rs2_local_addr_result_accumulate = 1'h0; // @[LocalAddr.scala:116:26]
wire mvin_cmd_rs2_local_addr_result_read_full_acc_row = 1'h0; // @[LocalAddr.scala:116:26]
wire _next_kch_T_2 = 1'h0; // @[Util.scala:42:8]
wire _io_cmd_valid_T_1; // @[LoopConv.scala:531:42]
wire _io_idle_T_2; // @[LoopConv.scala:520:29]
wire io_req_ready_0; // @[LoopConv.scala:414:7]
wire [6:0] io_cmd_bits_inst_funct_0; // @[LoopConv.scala:414:7]
wire [4:0] io_cmd_bits_inst_rs2_0; // @[LoopConv.scala:414:7]
wire [4:0] io_cmd_bits_inst_rs1_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_inst_xd_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_inst_xs1_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_inst_xs2_0; // @[LoopConv.scala:414:7]
wire [4:0] io_cmd_bits_inst_rd_0; // @[LoopConv.scala:414:7]
wire [6:0] io_cmd_bits_inst_opcode_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_debug_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_cease_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_wfi_0; // @[LoopConv.scala:414:7]
wire [31:0] io_cmd_bits_status_isa_0; // @[LoopConv.scala:414:7]
wire [1:0] io_cmd_bits_status_dprv_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_dv_0; // @[LoopConv.scala:414:7]
wire [1:0] io_cmd_bits_status_prv_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_v_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_sd_0; // @[LoopConv.scala:414:7]
wire [22:0] io_cmd_bits_status_zero2_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_mpv_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_gva_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_mbe_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_sbe_0; // @[LoopConv.scala:414:7]
wire [1:0] io_cmd_bits_status_sxl_0; // @[LoopConv.scala:414:7]
wire [1:0] io_cmd_bits_status_uxl_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_sd_rv32_0; // @[LoopConv.scala:414:7]
wire [7:0] io_cmd_bits_status_zero1_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_tsr_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_tw_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_tvm_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_mxr_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_sum_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_mprv_0; // @[LoopConv.scala:414:7]
wire [1:0] io_cmd_bits_status_xs_0; // @[LoopConv.scala:414:7]
wire [1:0] io_cmd_bits_status_fs_0; // @[LoopConv.scala:414:7]
wire [1:0] io_cmd_bits_status_mpp_0; // @[LoopConv.scala:414:7]
wire [1:0] io_cmd_bits_status_vs_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_spp_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_mpie_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_ube_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_spie_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_upie_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_mie_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_hie_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_sie_0; // @[LoopConv.scala:414:7]
wire io_cmd_bits_status_uie_0; // @[LoopConv.scala:414:7]
wire [63:0] io_cmd_bits_rs1_0; // @[LoopConv.scala:414:7]
wire [63:0] io_cmd_bits_rs2_0; // @[LoopConv.scala:414:7]
wire io_cmd_valid_0; // @[LoopConv.scala:414:7]
wire io_idle_0; // @[LoopConv.scala:414:7]
wire io_loop_id_0; // @[LoopConv.scala:414:7]
reg [1:0] state; // @[LoopConv.scala:435:22]
reg [15:0] req_outer_bounds_batch_size; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_in_row_dim; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_in_col_dim; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_in_channels; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_out_channels; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_out_col_dim; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_out_row_dim; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_out_stride; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_in_stride; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_weight_stride; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_pool_out_row_dim; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_pool_out_col_dim; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_stride; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_padding; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_kernel_dim; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_kernel_dilation; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_pool_size; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_pool_stride; // @[LoopConv.scala:437:16]
reg [15:0] req_outer_bounds_pool_padding; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_batches; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_porows; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_pocols; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_pochs; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_krows; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_kcols; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_kchs; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_lpad; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_rpad; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_upad; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_dpad; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_plpad; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_prad; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_pupad; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_pdpad; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_orows; // @[LoopConv.scala:437:16]
reg [15:0] req_inner_bounds_ocols; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_ochs; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_irows; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_icols; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_irows_unpadded; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_icols_unpadded; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_ichs; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_out_channels_per_bank; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_in_channels_per_bank; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_bias_spad_stride; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_input_spad_stride; // @[LoopConv.scala:437:16]
reg [15:0] req_derived_params_weight_spad_stride; // @[LoopConv.scala:437:16]
reg [14:0] req_addr_end; // @[LoopConv.scala:437:16]
reg [39:0] req_dram_addr; // @[LoopConv.scala:437:16]
reg req_trans_weight_1203; // @[LoopConv.scala:437:16]
reg req_trans_weight_0132; // @[LoopConv.scala:437:16]
reg req_dw; // @[LoopConv.scala:437:16]
reg req_loop_id; // @[LoopConv.scala:437:16]
assign io_loop_id_0 = req_loop_id; // @[LoopConv.scala:414:7, :437:16]
wire _max_chs_per_mvin_max_ochs_per_mvin_T = req_derived_params_ochs < 16'h40; // @[LoopConv.scala:437:16, :444:38]
wire [15:0] max_chs_per_mvin_max_ochs_per_mvin = _max_chs_per_mvin_max_ochs_per_mvin_T ? req_derived_params_ochs : 16'h40; // @[LoopConv.scala:437:16, :444:{32,38}]
wire _max_chs_per_mvin_max_kchs_per_mvin_T = req_inner_bounds_kchs < 16'h40; // @[LoopConv.scala:437:16, :444:38, :445:38]
wire [15:0] max_chs_per_mvin_max_kchs_per_mvin = _max_chs_per_mvin_max_kchs_per_mvin_T ? req_inner_bounds_kchs : 16'h40; // @[LoopConv.scala:437:16, :444:38, :445:{32,38}]
wire [15:0] max_chs_per_mvin = req_trans_weight_0132 ? max_chs_per_mvin_max_kchs_per_mvin : max_chs_per_mvin_max_ochs_per_mvin; // @[LoopConv.scala:437:16, :444:32, :445:32, :446:8]
wire [31:0] _GEN = {16'h0, req_inner_bounds_kcols}; // @[LoopConv.scala:437:16, :449:64]
wire [31:0] _B_rows_T = {16'h0, req_derived_params_in_channels_per_bank} * _GEN; // @[LoopConv.scala:437:16, :449:64]
wire [47:0] _GEN_0 = {32'h0, req_inner_bounds_krows}; // @[LoopConv.scala:437:16, :449:72]
wire [47:0] _B_rows_T_1 = {16'h0, _B_rows_T} * _GEN_0; // @[LoopConv.scala:449:{64,72}]
wire [63:0] _GEN_1 = {48'h0, req_derived_params_ochs}; // @[LoopConv.scala:437:16, :449:80]
wire [63:0] _B_rows_T_2 = {16'h0, _B_rows_T_1} * _GEN_1; // @[LoopConv.scala:449:{72,80}]
wire [31:0] _B_rows_T_3 = {16'h0, req_derived_params_out_channels_per_bank} * _GEN; // @[LoopConv.scala:437:16, :449:64, :450:27]
wire [47:0] _B_rows_T_4 = {16'h0, _B_rows_T_3} * _GEN_0; // @[LoopConv.scala:449:72, :450:{27,35}]
wire [63:0] _GEN_2 = {48'h0, req_inner_bounds_kchs}; // @[LoopConv.scala:437:16, :449:80, :450:43]
wire [63:0] _B_rows_T_5 = {16'h0, _B_rows_T_4} * _GEN_2; // @[LoopConv.scala:450:{35,43}]
wire [63:0] B_rows = req_trans_weight_0132 ? _B_rows_T_2 : _B_rows_T_5; // @[LoopConv.scala:437:16, :449:{19,80}, :450:43]
wire [64:0] _addr_start_T = {50'h0, req_addr_end} - {1'h0, B_rows}; // @[LoopConv.scala:437:16, :449:19, :451:33]
wire [63:0] addr_start = _addr_start_T[63:0]; // @[LoopConv.scala:451:33]
wire [31:0] _GEN_3 = {16'h0, req_outer_bounds_kernel_dim}; // @[LoopConv.scala:437:16, :455:42]
wire [31:0] _dram_stride_T = _GEN_3 * _GEN_3; // @[LoopConv.scala:455:42]
wire [47:0] _GEN_4 = {32'h0, req_outer_bounds_out_channels}; // @[LoopConv.scala:437:16, :455:55]
wire [47:0] _dram_stride_T_1 = {16'h0, _dram_stride_T} * _GEN_4; // @[LoopConv.scala:455:{42,55}]
wire [15:0] _dram_stride_T_2 = req_trans_weight_0132 ? req_outer_bounds_in_channels : req_outer_bounds_weight_stride; // @[Mux.scala:126:16]
wire [47:0] _dram_stride_T_3 = req_trans_weight_1203 ? _dram_stride_T_1 : {32'h0, _dram_stride_T_2}; // @[Mux.scala:126:16]
wire [47:0] _dram_stride_T_4 = req_dw ? 48'h1 : _dram_stride_T_3; // @[Mux.scala:126:16]
wire [48:0] dram_stride = {1'h0, _dram_stride_T_4}; // @[Mux.scala:126:16]
reg [15:0] och; // @[LoopConv.scala:460:16]
reg [15:0] krow; // @[LoopConv.scala:461:17]
reg [15:0] kcol; // @[LoopConv.scala:462:17]
reg [15:0] kch; // @[LoopConv.scala:463:16]
wire [31:0] _GEN_5 = {16'h0, krow}; // @[LoopConv.scala:461:17, :466:35]
wire [31:0] _GEN_6 = _GEN_5 * _GEN_3; // @[LoopConv.scala:455:42, :466:35]
wire [31:0] _dram_offset_T; // @[LoopConv.scala:466:35]
assign _dram_offset_T = _GEN_6; // @[LoopConv.scala:466:35]
wire [31:0] _dram_offset_T_8; // @[LoopConv.scala:467:21]
assign _dram_offset_T_8 = _GEN_6; // @[LoopConv.scala:466:35, :467:21]
wire [31:0] _dram_offset_T_13; // @[LoopConv.scala:468:66]
assign _dram_offset_T_13 = _GEN_6; // @[LoopConv.scala:466:35, :468:66]
wire [31:0] _dram_offset_T_19; // @[LoopConv.scala:469:37]
assign _dram_offset_T_19 = _GEN_6; // @[LoopConv.scala:466:35, :469:37]
wire [47:0] _dram_offset_T_1 = {16'h0, _dram_offset_T} * {32'h0, req_outer_bounds_in_channels}; // @[LoopConv.scala:437:16, :466:{35,46}]
wire [31:0] _GEN_7 = {16'h0, kcol}; // @[LoopConv.scala:462:17, :466:66]
wire [31:0] _dram_offset_T_2 = _GEN_7 * {16'h0, req_outer_bounds_in_channels}; // @[LoopConv.scala:437:16, :466:66]
wire [48:0] _dram_offset_T_3 = {1'h0, _dram_offset_T_1} + {17'h0, _dram_offset_T_2}; // @[LoopConv.scala:466:{46,59,66}]
wire [49:0] _dram_offset_T_4 = {1'h0, _dram_offset_T_3} + {34'h0, kch}; // @[LoopConv.scala:463:16, :466:{59,79}]
wire [65:0] _dram_offset_T_5 = {16'h0, _dram_offset_T_4} * {50'h0, req_outer_bounds_weight_stride}; // @[LoopConv.scala:437:16, :451:33, :466:{79,87}]
wire [66:0] _GEN_8 = {51'h0, och}; // @[LoopConv.scala:460:16, :466:103]
wire [66:0] _dram_offset_T_6 = {1'h0, _dram_offset_T_5} + _GEN_8; // @[LoopConv.scala:466:{87,103}]
wire [67:0] _dram_offset_T_7 = {1'h0, _dram_offset_T_6}; // @[LoopConv.scala:466:{103,111}]
wire [32:0] _dram_offset_T_9 = {1'h0, _dram_offset_T_8} + {17'h0, kcol}; // @[LoopConv.scala:462:17, :466:59, :467:{21,34}]
wire [33:0] _dram_offset_T_10 = {1'h0, _dram_offset_T_9}; // @[LoopConv.scala:467:{34,43}]
wire [31:0] _dram_offset_T_11 = {16'h0, kch} * _GEN_3; // @[LoopConv.scala:455:42, :463:16, :468:36]
wire [47:0] _dram_offset_T_12 = {16'h0, _dram_offset_T_11} * {32'h0, req_outer_bounds_kernel_dim}; // @[LoopConv.scala:437:16, :468:{36,47}]
wire [48:0] _dram_offset_T_14 = {1'h0, _dram_offset_T_12} + {17'h0, _dram_offset_T_13}; // @[LoopConv.scala:466:59, :468:{47,59,66}]
wire [49:0] _dram_offset_T_15 = {1'h0, _dram_offset_T_14} + {34'h0, kcol}; // @[LoopConv.scala:462:17, :466:79, :468:{59,78}]
wire [65:0] _dram_offset_T_16 = {16'h0, _dram_offset_T_15} * {50'h0, req_outer_bounds_out_channels}; // @[LoopConv.scala:437:16, :451:33, :468:{78,87}]
wire [66:0] _dram_offset_T_17 = {1'h0, _dram_offset_T_16} + _GEN_8; // @[LoopConv.scala:466:103, :468:{87,102}]
wire [67:0] _dram_offset_T_18 = {1'h0, _dram_offset_T_17}; // @[LoopConv.scala:468:{102,110}]
wire [47:0] _dram_offset_T_20 = {16'h0, _dram_offset_T_19} * _GEN_4; // @[LoopConv.scala:455:55, :469:{37,48}]
wire [31:0] _dram_offset_T_21 = _GEN_7 * {16'h0, req_outer_bounds_out_channels}; // @[LoopConv.scala:437:16, :466:66, :469:69]
wire [48:0] _dram_offset_T_22 = {1'h0, _dram_offset_T_20} + {17'h0, _dram_offset_T_21}; // @[LoopConv.scala:466:59, :469:{48,62,69}]
wire [49:0] _dram_offset_T_23 = {1'h0, _dram_offset_T_22} + {34'h0, och}; // @[LoopConv.scala:460:16, :466:79, :469:{62,83}]
wire [65:0] _dram_offset_T_24 = {16'h0, _dram_offset_T_23} * {50'h0, req_outer_bounds_in_channels}; // @[LoopConv.scala:437:16, :451:33, :469:{83,91}]
wire [66:0] _dram_offset_T_25 = {1'h0, _dram_offset_T_24} + {51'h0, kch}; // @[LoopConv.scala:463:16, :466:103, :469:{91,105}]
wire [67:0] _dram_offset_T_26 = {1'h0, _dram_offset_T_25}; // @[LoopConv.scala:469:{105,113}]
wire [67:0] _dram_offset_T_27 = req_trans_weight_0132 ? _dram_offset_T_26 : _dram_offset_T_7; // @[Mux.scala:126:16]
wire [67:0] _dram_offset_T_28 = req_trans_weight_1203 ? _dram_offset_T_18 : _dram_offset_T_27; // @[Mux.scala:126:16]
wire [67:0] dram_offset = req_dw ? {34'h0, _dram_offset_T_10} : _dram_offset_T_28; // @[Mux.scala:126:16]
wire [67:0] _dram_addr_T = {36'h0, dram_offset[31:0]}; // @[Mux.scala:126:16]
wire [68:0] _dram_addr_T_1 = {29'h0, req_dram_addr} + {1'h0, _dram_addr_T}; // @[LoopConv.scala:437:16, :471:33, :1556:17]
wire [67:0] dram_addr = _dram_addr_T_1[67:0]; // @[LoopConv.scala:471:33]
wire [15:0] _spad_addr_T = kch / 16'h10; // @[LoopConv.scala:463:16, :475:23, :483:20]
wire [31:0] _GEN_9 = {16'h0, req_inner_bounds_krows}; // @[LoopConv.scala:437:16, :475:55]
wire [31:0] _spad_addr_T_1 = {16'h0, _spad_addr_T} * _GEN_9; // @[LoopConv.scala:475:{23,55}]
wire [47:0] _GEN_10 = {32'h0, req_inner_bounds_kcols}; // @[LoopConv.scala:437:16, :475:63]
wire [47:0] _spad_addr_T_2 = {16'h0, _spad_addr_T_1} * _GEN_10; // @[LoopConv.scala:475:{55,63}]
wire [63:0] _spad_addr_T_3 = {16'h0, _spad_addr_T_2} * _GEN_1; // @[LoopConv.scala:449:80, :475:{63,71}]
wire [64:0] _GEN_11 = {1'h0, addr_start}; // @[LoopConv.scala:451:33, :475:16]
wire [64:0] _spad_addr_T_4 = _GEN_11 + {1'h0, _spad_addr_T_3}; // @[LoopConv.scala:475:{16,71}]
wire [63:0] _spad_addr_T_5 = _spad_addr_T_4[63:0]; // @[LoopConv.scala:475:16]
wire [31:0] _GEN_12 = _GEN_5 * _GEN; // @[LoopConv.scala:449:64, :466:35, :475:85]
wire [31:0] _spad_addr_T_6; // @[LoopConv.scala:475:85]
assign _spad_addr_T_6 = _GEN_12; // @[LoopConv.scala:475:85]
wire [31:0] _spad_addr_T_21; // @[LoopConv.scala:476:85]
assign _spad_addr_T_21 = _GEN_12; // @[LoopConv.scala:475:85, :476:85]
wire [47:0] _spad_addr_T_7 = {16'h0, _spad_addr_T_6} * {32'h0, req_derived_params_ochs}; // @[LoopConv.scala:437:16, :475:{85,93}]
wire [64:0] _spad_addr_T_8 = {1'h0, _spad_addr_T_5} + {17'h0, _spad_addr_T_7}; // @[LoopConv.scala:466:59, :475:{16,78,93}]
wire [63:0] _spad_addr_T_9 = _spad_addr_T_8[63:0]; // @[LoopConv.scala:475:78]
wire [31:0] _spad_addr_T_10 = _GEN_7 * {16'h0, req_derived_params_ochs}; // @[LoopConv.scala:437:16, :466:66, :475:107]
wire [64:0] _spad_addr_T_11 = {1'h0, _spad_addr_T_9} + {33'h0, _spad_addr_T_10}; // @[LoopConv.scala:467:43, :475:{78,100,107}]
wire [63:0] _spad_addr_T_12 = _spad_addr_T_11[63:0]; // @[LoopConv.scala:475:100]
wire [64:0] _spad_addr_T_13 = {1'h0, _spad_addr_T_12} + {49'h0, och}; // @[LoopConv.scala:460:16, :475:{100,114}]
wire [63:0] _spad_addr_T_14 = _spad_addr_T_13[63:0]; // @[LoopConv.scala:475:114]
wire [15:0] _spad_addr_T_15 = och / 16'h10; // @[LoopConv.scala:460:16, :476:23, :483:20]
wire [31:0] _spad_addr_T_16 = {16'h0, _spad_addr_T_15} * _GEN_9; // @[LoopConv.scala:475:55, :476:{23,55}]
wire [47:0] _spad_addr_T_17 = {16'h0, _spad_addr_T_16} * _GEN_10; // @[LoopConv.scala:475:63, :476:{55,63}]
wire [63:0] _spad_addr_T_18 = {16'h0, _spad_addr_T_17} * _GEN_2; // @[LoopConv.scala:450:43, :476:{63,71}]
wire [64:0] _spad_addr_T_19 = _GEN_11 + {1'h0, _spad_addr_T_18}; // @[LoopConv.scala:475:16, :476:{16,71}]
wire [63:0] _spad_addr_T_20 = _spad_addr_T_19[63:0]; // @[LoopConv.scala:476:16]
wire [47:0] _spad_addr_T_22 = {16'h0, _spad_addr_T_21} * {32'h0, req_inner_bounds_kchs}; // @[LoopConv.scala:437:16, :476:{85,93}]
wire [64:0] _spad_addr_T_23 = {1'h0, _spad_addr_T_20} + {17'h0, _spad_addr_T_22}; // @[LoopConv.scala:466:59, :476:{16,78,93}]
wire [63:0] _spad_addr_T_24 = _spad_addr_T_23[63:0]; // @[LoopConv.scala:476:78]
wire [31:0] _spad_addr_T_25 = _GEN_7 * {16'h0, req_inner_bounds_kchs}; // @[LoopConv.scala:437:16, :466:66, :476:107]
wire [64:0] _spad_addr_T_26 = {1'h0, _spad_addr_T_24} + {33'h0, _spad_addr_T_25}; // @[LoopConv.scala:467:43, :476:{78,100,107}]
wire [63:0] _spad_addr_T_27 = _spad_addr_T_26[63:0]; // @[LoopConv.scala:476:100]
wire [64:0] _spad_addr_T_28 = {1'h0, _spad_addr_T_27} + {49'h0, kch}; // @[LoopConv.scala:463:16, :475:114, :476:{100,114}]
wire [63:0] _spad_addr_T_29 = _spad_addr_T_28[63:0]; // @[LoopConv.scala:476:114]
wire [63:0] spad_addr = req_trans_weight_0132 ? _spad_addr_T_14 : _spad_addr_T_29; // @[LoopConv.scala:437:16, :473:22, :475:114, :476:114]
wire [16:0] _GEN_13 = {1'h0, req_inner_bounds_kchs}; // @[LoopConv.scala:437:16, :480:14]
wire [16:0] _GEN_14 = {1'h0, kch}; // @[LoopConv.scala:463:16, :480:14]
wire [16:0] _GEN_15 = _GEN_13 - _GEN_14; // @[LoopConv.scala:480:14]
wire [16:0] _J_T; // @[LoopConv.scala:480:14]
assign _J_T = _GEN_15; // @[LoopConv.scala:480:14]
wire [16:0] _J_T_3; // @[LoopConv.scala:480:63]
assign _J_T_3 = _GEN_15; // @[LoopConv.scala:480:{14,63}]
wire [16:0] _K_T_6; // @[LoopConv.scala:484:14]
assign _K_T_6 = _GEN_15; // @[LoopConv.scala:480:14, :484:14]
wire [16:0] _K_T_9; // @[LoopConv.scala:484:55]
assign _K_T_9 = _GEN_15; // @[LoopConv.scala:480:14, :484:55]
wire [15:0] _J_T_1 = _J_T[15:0]; // @[LoopConv.scala:480:14]
wire _J_T_2 = _J_T_1 > max_chs_per_mvin; // @[LoopConv.scala:446:8, :480:{14,20}]
wire [15:0] _J_T_4 = _J_T_3[15:0]; // @[LoopConv.scala:480:63]
wire [15:0] _J_T_5 = _J_T_2 ? max_chs_per_mvin : _J_T_4; // @[LoopConv.scala:446:8, :480:{8,20,63}]
wire [16:0] _GEN_16 = {1'h0, req_derived_params_ochs}; // @[LoopConv.scala:437:16, :481:14]
wire [16:0] _GEN_17 = {1'h0, och}; // @[LoopConv.scala:460:16, :481:14]
wire [16:0] _GEN_18 = _GEN_16 - _GEN_17; // @[LoopConv.scala:481:14]
wire [16:0] _J_T_6; // @[LoopConv.scala:481:14]
assign _J_T_6 = _GEN_18; // @[LoopConv.scala:481:14]
wire [16:0] _J_T_9; // @[LoopConv.scala:481:63]
assign _J_T_9 = _GEN_18; // @[LoopConv.scala:481:{14,63}]
wire [16:0] _K_T; // @[LoopConv.scala:483:14]
assign _K_T = _GEN_18; // @[LoopConv.scala:481:14, :483:14]
wire [16:0] _K_T_3; // @[LoopConv.scala:483:55]
assign _K_T_3 = _GEN_18; // @[LoopConv.scala:481:14, :483:55]
wire [15:0] _J_T_7 = _J_T_6[15:0]; // @[LoopConv.scala:481:14]
wire _J_T_8 = _J_T_7 > max_chs_per_mvin; // @[LoopConv.scala:446:8, :481:{14,20}]
wire [15:0] _J_T_10 = _J_T_9[15:0]; // @[LoopConv.scala:481:63]
wire [15:0] _J_T_11 = _J_T_8 ? max_chs_per_mvin : _J_T_10; // @[LoopConv.scala:446:8, :481:{8,20,63}]
wire [15:0] J = req_trans_weight_0132 ? _J_T_5 : _J_T_11; // @[LoopConv.scala:437:16, :479:14, :480:8, :481:8]
wire [15:0] _K_T_1 = _K_T[15:0]; // @[LoopConv.scala:483:14]
wire _K_T_2 = _K_T_1 > 16'h10; // @[LoopConv.scala:483:{14,20}]
wire [15:0] _K_T_4 = _K_T_3[15:0]; // @[LoopConv.scala:483:55]
wire [15:0] _K_T_5 = _K_T_2 ? 16'h10 : _K_T_4; // @[LoopConv.scala:483:{8,20,55}]
wire [15:0] _K_T_7 = _K_T_6[15:0]; // @[LoopConv.scala:484:14]
wire _K_T_8 = _K_T_7 > 16'h10; // @[LoopConv.scala:483:20, :484:{14,20}]
wire [15:0] _K_T_10 = _K_T_9[15:0]; // @[LoopConv.scala:484:55]
wire [15:0] _K_T_11 = _K_T_8 ? 16'h10 : _K_T_10; // @[LoopConv.scala:483:20, :484:{8,20,55}]
wire [15:0] K = req_trans_weight_0132 ? _K_T_5 : _K_T_11; // @[LoopConv.scala:437:16, :482:14, :483:8, :484:8]
wire [63:0] _config_cmd_rs1_T; // @[LoopConv.scala:508:36]
wire [63:0] config_cmd_rs1; // @[LoopConv.scala:496:24]
wire [63:0] config_cmd_rs2; // @[LoopConv.scala:496:24]
wire [13:0] config_cmd_rs1_stride; // @[LoopConv.scala:500:28]
assign config_cmd_rs1_stride = req_derived_params_weight_spad_stride[13:0]; // @[LoopConv.scala:437:16, :500:28, :503:25]
wire [16:0] config_cmd_rs1_hi_lo = {config_cmd_rs1_stride, 3'h0}; // @[LoopConv.scala:500:28, :508:36]
wire [50:0] config_cmd_rs1_hi = {34'hFE000000, config_cmd_rs1_hi_lo}; // @[LoopConv.scala:508:36]
assign _config_cmd_rs1_T = {config_cmd_rs1_hi, 13'h109}; // @[LoopConv.scala:508:36]
assign config_cmd_rs1 = _config_cmd_rs1_T; // @[LoopConv.scala:496:24, :508:36]
assign config_cmd_rs2 = {15'h0, dram_stride}; // @[LoopConv.scala:457:6, :496:24, :510:18]
wire _io_req_ready_T = ~(|state); // @[LoopConv.scala:435:22, :519:25]
wire _io_req_ready_T_1 = ~_command_p_io_busy; // @[LoopConv.scala:493:25, :519:37]
assign _io_req_ready_T_2 = _io_req_ready_T & _io_req_ready_T_1; // @[LoopConv.scala:519:{25,34,37}]
assign io_req_ready_0 = _io_req_ready_T_2; // @[LoopConv.scala:414:7, :519:34]
wire _io_idle_T = ~(|state); // @[LoopConv.scala:435:22, :519:25, :520:20]
wire _io_idle_T_1 = ~_command_p_io_busy; // @[LoopConv.scala:493:25, :519:37, :520:32]
assign _io_idle_T_2 = _io_idle_T & _io_idle_T_1; // @[LoopConv.scala:520:{20,29,32}]
assign io_idle_0 = _io_idle_T_2; // @[LoopConv.scala:414:7, :520:29]
wire _command_p_io_in_valid_T = |state; // @[LoopConv.scala:435:22, :519:25, :523:34]
wire _command_p_io_in_valid_T_1 = ~io_wait_for_prev_loop_0; // @[LoopConv.scala:414:7, :523:46]
wire _command_p_io_in_valid_T_2 = _command_p_io_in_valid_T & _command_p_io_in_valid_T_1; // @[LoopConv.scala:523:{34,43,46}]
wire _command_p_io_in_valid_T_3 = |req_dram_addr; // @[LoopConv.scala:437:16, :523:87]
wire _command_p_io_in_valid_T_4 = _command_p_io_in_valid_T_2 & _command_p_io_in_valid_T_3; // @[LoopConv.scala:523:{43,69,87}]
wire _command_p_io_in_bits_cmd_T = state == 2'h1; // @[LoopConv.scala:435:22, :524:41]
wire [6:0] _command_p_io_in_bits_cmd_T_1_inst_funct = {6'h0, ~_command_p_io_in_bits_cmd_T}; // @[LoopConv.scala:524:{34,41}]
wire [63:0] _command_p_io_in_bits_cmd_T_1_rs1 = _command_p_io_in_bits_cmd_T ? config_cmd_rs1 : 64'h0; // @[LoopConv.scala:496:24, :524:{34,41}]
wire [63:0] _command_p_io_in_bits_cmd_T_1_rs2 = _command_p_io_in_bits_cmd_T ? config_cmd_rs2 : 64'h0; // @[LoopConv.scala:496:24, :524:{34,41}]
wire _command_p_io_out_ready_T = ~io_rob_overloaded_0; // @[LoopConv.scala:414:7, :530:45]
wire _command_p_io_out_ready_T_1 = io_cmd_ready_0 & _command_p_io_out_ready_T; // @[LoopConv.scala:414:7, :530:{42,45}]
wire _io_cmd_valid_T = ~io_rob_overloaded_0; // @[LoopConv.scala:414:7, :530:45, :531:45]
assign _io_cmd_valid_T_1 = _command_p_io_out_valid & _io_cmd_valid_T; // @[LoopConv.scala:493:25, :531:{42,45}]
assign io_cmd_valid_0 = _io_cmd_valid_T_1; // @[LoopConv.scala:414:7, :531:42]
wire _T = _command_p_io_out_bits_cmd_inst_funct == 7'h1; // @[LoopConv.scala:493:25, :533:46]
assign io_cmd_bits_rs1_0 = _T ? _command_p_io_out_bits_dram_addr[63:0] : _command_p_io_out_bits_cmd_rs1; // @[LoopConv.scala:414:7, :493:25, :532:15, :533:{46,61}, :535:21]
wire [6:0] io_cmd_bits_rs2_lo_hi_1 = mvin_cmd_rs2_num_cols; // @[LoopConv.scala:536:28, :541:37]
wire [2:0] mvin_cmd_rs2_local_addr_result_norm_cmd; // @[LocalAddr.scala:116:26]
wire [2:0] _io_cmd_bits_rs2_T = mvin_cmd_rs2_local_addr_norm_cmd; // @[LoopConv.scala:536:28, :541:37]
wire mvin_cmd_rs2_local_addr_result_garbage_bit; // @[LocalAddr.scala:116:26]
wire [13:0] mvin_cmd_rs2_local_addr_result_data; // @[LocalAddr.scala:116:26]
wire mvin_cmd_rs2_local_addr_garbage_bit; // @[LoopConv.scala:536:28]
wire [13:0] mvin_cmd_rs2_local_addr_data; // @[LoopConv.scala:536:28]
wire [4:0] mvin_cmd_rs2_num_rows; // @[LoopConv.scala:536:28]
assign mvin_cmd_rs2_num_rows = _command_p_io_out_bits_K[4:0]; // @[LoopConv.scala:493:25, :536:28, :538:27]
assign mvin_cmd_rs2_num_cols = _command_p_io_out_bits_J[6:0]; // @[LoopConv.scala:493:25, :536:28, :539:27]
wire _mvin_cmd_rs2_local_addr_result_result_T_6; // @[LocalAddr.scala:108:37]
wire _mvin_cmd_rs2_local_addr_result_result_T_5; // @[LocalAddr.scala:108:37]
wire mvin_cmd_rs2_local_addr_result_result_is_acc_addr = _mvin_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr; // @[LocalAddr.scala:108:{26,37}]
wire _mvin_cmd_rs2_local_addr_result_result_T_4; // @[LocalAddr.scala:108:37]
wire mvin_cmd_rs2_local_addr_result_result_accumulate = _mvin_cmd_rs2_local_addr_result_result_WIRE_accumulate; // @[LocalAddr.scala:108:{26,37}]
wire [2:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_3; // @[LocalAddr.scala:108:37]
wire mvin_cmd_rs2_local_addr_result_result_read_full_acc_row = _mvin_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row; // @[LocalAddr.scala:108:{26,37}]
wire [10:0] _mvin_cmd_rs2_local_addr_result_result_T_2; // @[LocalAddr.scala:108:37]
wire [2:0] mvin_cmd_rs2_local_addr_result_result_norm_cmd = _mvin_cmd_rs2_local_addr_result_result_WIRE_norm_cmd; // @[LocalAddr.scala:108:{26,37}]
wire _mvin_cmd_rs2_local_addr_result_result_T_1; // @[LocalAddr.scala:108:37]
wire [13:0] _mvin_cmd_rs2_local_addr_result_result_T; // @[LocalAddr.scala:108:37]
wire mvin_cmd_rs2_local_addr_result_result_garbage_bit = _mvin_cmd_rs2_local_addr_result_result_WIRE_garbage_bit; // @[LocalAddr.scala:108:{26,37}]
wire [13:0] mvin_cmd_rs2_local_addr_result_result_data = _mvin_cmd_rs2_local_addr_result_result_WIRE_data; // @[LocalAddr.scala:108:{26,37}]
wire [31:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_1 = _command_p_io_out_bits_spad_addr[31:0]; // @[LoopConv.scala:493:25]
assign _mvin_cmd_rs2_local_addr_result_result_T = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[13:0]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_data = _mvin_cmd_rs2_local_addr_result_result_T; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_1 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[14]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_garbage_bit = _mvin_cmd_rs2_local_addr_result_result_T_1; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_2 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[25:15]; // @[LocalAddr.scala:108:37]
wire [10:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_garbage = _mvin_cmd_rs2_local_addr_result_result_T_2; // @[LocalAddr.scala:108:37]
wire [2:0] _mvin_cmd_rs2_local_addr_result_result_T_3 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[28:26]; // @[LocalAddr.scala:108:37]
wire [2:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_2 = _mvin_cmd_rs2_local_addr_result_result_T_3; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_3 = _mvin_cmd_rs2_local_addr_result_result_WIRE_2; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_norm_cmd = _mvin_cmd_rs2_local_addr_result_result_WIRE_3; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_4 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[29]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row = _mvin_cmd_rs2_local_addr_result_result_T_4; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_5 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[30]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_accumulate = _mvin_cmd_rs2_local_addr_result_result_T_5; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_6 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[31]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr = _mvin_cmd_rs2_local_addr_result_result_T_6; // @[LocalAddr.scala:108:37]
assign mvin_cmd_rs2_local_addr_result_norm_cmd = mvin_cmd_rs2_local_addr_result_result_norm_cmd; // @[LocalAddr.scala:108:26, :116:26]
assign mvin_cmd_rs2_local_addr_result_garbage_bit = mvin_cmd_rs2_local_addr_result_result_garbage_bit; // @[LocalAddr.scala:108:26, :116:26]
assign mvin_cmd_rs2_local_addr_result_data = mvin_cmd_rs2_local_addr_result_result_data; // @[LocalAddr.scala:108:26, :116:26]
assign mvin_cmd_rs2_local_addr_norm_cmd = mvin_cmd_rs2_local_addr_result_norm_cmd; // @[LoopConv.scala:536:28]
assign mvin_cmd_rs2_local_addr_garbage_bit = mvin_cmd_rs2_local_addr_result_garbage_bit; // @[LoopConv.scala:536:28]
assign mvin_cmd_rs2_local_addr_data = mvin_cmd_rs2_local_addr_result_data; // @[LoopConv.scala:536:28]
wire [11:0] io_cmd_bits_rs2_lo_hi = {11'h0, mvin_cmd_rs2_local_addr_garbage_bit}; // @[LoopConv.scala:536:28, :541:37]
wire [25:0] io_cmd_bits_rs2_lo = {io_cmd_bits_rs2_lo_hi, mvin_cmd_rs2_local_addr_data}; // @[LoopConv.scala:536:28, :541:37]
wire [3:0] io_cmd_bits_rs2_hi_lo = {1'h0, _io_cmd_bits_rs2_T}; // @[LoopConv.scala:541:37]
wire [5:0] io_cmd_bits_rs2_hi = {2'h0, io_cmd_bits_rs2_hi_lo}; // @[LoopConv.scala:435:22, :541:37]
wire [31:0] _io_cmd_bits_rs2_T_1 = {io_cmd_bits_rs2_hi, io_cmd_bits_rs2_lo}; // @[LoopConv.scala:541:37]
wire [38:0] io_cmd_bits_rs2_lo_1 = {io_cmd_bits_rs2_lo_hi_1, _io_cmd_bits_rs2_T_1}; // @[LoopConv.scala:541:37]
wire [15:0] io_cmd_bits_rs2_hi_hi_1 = {11'h0, mvin_cmd_rs2_num_rows}; // @[LoopConv.scala:536:28, :541:37]
wire [24:0] io_cmd_bits_rs2_hi_1 = {io_cmd_bits_rs2_hi_hi_1, 9'h0}; // @[LoopConv.scala:541:37]
wire [63:0] _io_cmd_bits_rs2_T_2 = {io_cmd_bits_rs2_hi_1, io_cmd_bits_rs2_lo_1}; // @[LoopConv.scala:541:37]
assign io_cmd_bits_rs2_0 = _T ? _io_cmd_bits_rs2_T_2 : _command_p_io_out_bits_cmd_rs2; // @[LoopConv.scala:414:7, :493:25, :532:15, :533:{46,61}, :541:{21,37}]
wire [15:0] och_it = req_trans_weight_0132 ? 16'h10 : max_chs_per_mvin; // @[LoopConv.scala:437:16, :446:8, :483:20, :551:23]
wire [15:0] kch_it = req_trans_weight_0132 ? max_chs_per_mvin : 16'h10; // @[LoopConv.scala:437:16, :446:8, :483:20, :552:23]
wire [16:0] _next_kch_max_T = _GEN_13 - 17'h1; // @[Util.scala:39:28]
wire [15:0] next_kch_max = _next_kch_max_T[15:0]; // @[Util.scala:39:28]
wire [16:0] _GEN_19 = _GEN_14 + {1'h0, kch_it}; // @[Util.scala:41:15]
wire [16:0] _next_kch_T; // @[Util.scala:41:15]
assign _next_kch_T = _GEN_19; // @[Util.scala:41:15]
wire [16:0] _next_kch_T_3; // @[Util.scala:43:11]
assign _next_kch_T_3 = _GEN_19; // @[Util.scala:41:15, :43:11]
wire [15:0] _next_kch_T_1 = _next_kch_T[15:0]; // @[Util.scala:41:15]
wire _next_kch_T_4 = _next_kch_T_3 > {1'h0, next_kch_max}; // @[Util.scala:39:28, :43:{11,17}]
wire [15:0] _next_kch_T_5 = _next_kch_T_4 ? 16'h0 : _next_kch_T_1; // @[Mux.scala:126:16]
wire [15:0] next_kch = _next_kch_T_5; // @[Mux.scala:126:16]
wire _GEN_20 = next_kch == 16'h0; // @[Mux.scala:126:16]
wire _next_kcol_T; // @[LoopConv.scala:555:59]
assign _next_kcol_T = _GEN_20; // @[LoopConv.scala:555:59]
wire _next_krow_T_1; // @[LoopConv.scala:556:80]
assign _next_krow_T_1 = _GEN_20; // @[LoopConv.scala:555:59, :556:80]
wire _next_och_T_3; // @[LoopConv.scala:557:101]
assign _next_och_T_3 = _GEN_20; // @[LoopConv.scala:555:59, :557:101]
wire _state_T_5; // @[LoopConv.scala:564:91]
assign _state_T_5 = _GEN_20; // @[LoopConv.scala:555:59, :564:91]
wire [16:0] _next_kcol_max_T = {1'h0, req_inner_bounds_kcols} - 17'h1; // @[Util.scala:39:28]
wire [15:0] next_kcol_max = _next_kcol_max_T[15:0]; // @[Util.scala:39:28]
wire [16:0] _GEN_21 = {1'h0, kcol} + 17'h1; // @[Util.scala:41:15]
wire [16:0] _next_kcol_T_1; // @[Util.scala:41:15]
assign _next_kcol_T_1 = _GEN_21; // @[Util.scala:41:15]
wire [16:0] _next_kcol_T_4; // @[Util.scala:43:11]
assign _next_kcol_T_4 = _GEN_21; // @[Util.scala:41:15, :43:11]
wire [15:0] _next_kcol_T_2 = _next_kcol_T_1[15:0]; // @[Util.scala:41:15]
wire _next_kcol_T_3 = ~_next_kcol_T; // @[Util.scala:42:8]
wire _next_kcol_T_5 = _next_kcol_T_4 > {1'h0, next_kcol_max}; // @[Util.scala:39:28, :43:{11,17}]
wire [15:0] _next_kcol_T_6 = _next_kcol_T_5 ? 16'h0 : _next_kcol_T_2; // @[Mux.scala:126:16]
wire [15:0] next_kcol = _next_kcol_T_3 ? kcol : _next_kcol_T_6; // @[Mux.scala:126:16]
wire _GEN_22 = next_kcol == 16'h0; // @[Mux.scala:126:16]
wire _next_krow_T; // @[LoopConv.scala:556:60]
assign _next_krow_T = _GEN_22; // @[LoopConv.scala:556:60]
wire _next_och_T_1; // @[LoopConv.scala:557:81]
assign _next_och_T_1 = _GEN_22; // @[LoopConv.scala:556:60, :557:81]
wire _state_T_3; // @[LoopConv.scala:564:71]
assign _state_T_3 = _GEN_22; // @[LoopConv.scala:556:60, :564:71]
wire _next_krow_T_2 = _next_krow_T & _next_krow_T_1; // @[LoopConv.scala:556:{60,68,80}]
wire [16:0] _next_krow_max_T = {1'h0, req_inner_bounds_krows} - 17'h1; // @[Util.scala:39:28]
wire [15:0] next_krow_max = _next_krow_max_T[15:0]; // @[Util.scala:39:28]
wire [16:0] _GEN_23 = {1'h0, krow} + 17'h1; // @[Util.scala:41:15]
wire [16:0] _next_krow_T_3; // @[Util.scala:41:15]
assign _next_krow_T_3 = _GEN_23; // @[Util.scala:41:15]
wire [16:0] _next_krow_T_6; // @[Util.scala:43:11]
assign _next_krow_T_6 = _GEN_23; // @[Util.scala:41:15, :43:11]
wire [15:0] _next_krow_T_4 = _next_krow_T_3[15:0]; // @[Util.scala:41:15]
wire _next_krow_T_5 = ~_next_krow_T_2; // @[Util.scala:42:8]
wire _next_krow_T_7 = _next_krow_T_6 > {1'h0, next_krow_max}; // @[Util.scala:39:28, :43:{11,17}]
wire [15:0] _next_krow_T_8 = _next_krow_T_7 ? 16'h0 : _next_krow_T_4; // @[Mux.scala:126:16]
wire [15:0] next_krow = _next_krow_T_5 ? krow : _next_krow_T_8; // @[Mux.scala:126:16]
wire _GEN_24 = next_krow == 16'h0; // @[Mux.scala:126:16]
wire _next_och_T; // @[LoopConv.scala:557:60]
assign _next_och_T = _GEN_24; // @[LoopConv.scala:557:60]
wire _state_T_1; // @[LoopConv.scala:564:50]
assign _state_T_1 = _GEN_24; // @[LoopConv.scala:557:60, :564:50]
wire _next_och_T_2 = _next_och_T & _next_och_T_1; // @[LoopConv.scala:557:{60,68,81}]
wire _next_och_T_4 = _next_och_T_2 & _next_och_T_3; // @[LoopConv.scala:557:{68,89,101}]
wire [16:0] _next_och_max_T = _GEN_16 - 17'h1; // @[Util.scala:39:28]
wire [15:0] next_och_max = _next_och_max_T[15:0]; // @[Util.scala:39:28]
wire [16:0] _GEN_25 = _GEN_17 + {1'h0, och_it}; // @[Util.scala:41:15]
wire [16:0] _next_och_T_5; // @[Util.scala:41:15]
assign _next_och_T_5 = _GEN_25; // @[Util.scala:41:15]
wire [16:0] _next_och_T_8; // @[Util.scala:43:11]
assign _next_och_T_8 = _GEN_25; // @[Util.scala:41:15, :43:11]
wire [15:0] _next_och_T_6 = _next_och_T_5[15:0]; // @[Util.scala:41:15]
wire _next_och_T_7 = ~_next_och_T_4; // @[Util.scala:42:8]
wire _next_och_T_9 = _next_och_T_8 > {1'h0, next_och_max}; // @[Util.scala:39:28, :43:{11,17}]
wire [15:0] _next_och_T_10 = _next_och_T_9 ? 16'h0 : _next_och_T_6; // @[Mux.scala:126:16]
wire [15:0] next_och = _next_och_T_7 ? och : _next_och_T_10; // @[Mux.scala:126:16]
wire _state_T = next_och == 16'h0; // @[Mux.scala:126:16]
wire _state_T_2 = _state_T & _state_T_1; // @[LoopConv.scala:564:{29,37,50}]
wire _state_T_4 = _state_T_2 & _state_T_3; // @[LoopConv.scala:564:{37,58,71}]
wire _state_T_6 = _state_T_4 & _state_T_5; // @[LoopConv.scala:564:{58,79,91}]
wire [1:0] _state_T_7 = {~_state_T_6, 1'h0}; // @[LoopConv.scala:564:{19,79}]
wire _T_2 = _command_p_io_in_ready & _command_p_io_in_valid_T_4; // @[Decoupled.scala:51:35]
wire _T_4 = io_req_ready_0 & io_req_valid_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[LoopConv.scala:414:7]
if (reset) // @[LoopConv.scala:414:7]
state <= 2'h0; // @[LoopConv.scala:435:22]
else if (_T_4) // @[Decoupled.scala:51:35]
state <= 2'h1; // @[LoopConv.scala:435:22, :524:41]
else if (|req_dram_addr) begin // @[LoopConv.scala:437:16, :523:87]
if (_T_2) // @[Decoupled.scala:51:35]
state <= _command_p_io_in_bits_cmd_T ? 2'h2 : _state_T_7; // @[LoopConv.scala:435:22, :524:41, :548:29, :549:13, :564:{13,19}]
end
else // @[LoopConv.scala:523:87]
state <= 2'h0; // @[LoopConv.scala:435:22]
if (_T_4) begin // @[Decoupled.scala:51:35]
req_outer_bounds_batch_size <= io_req_bits_outer_bounds_batch_size_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_in_row_dim <= io_req_bits_outer_bounds_in_row_dim_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_in_col_dim <= io_req_bits_outer_bounds_in_col_dim_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_in_channels <= io_req_bits_outer_bounds_in_channels_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_out_channels <= io_req_bits_outer_bounds_out_channels_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_out_col_dim <= io_req_bits_outer_bounds_out_col_dim_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_out_row_dim <= io_req_bits_outer_bounds_out_row_dim_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_out_stride <= io_req_bits_outer_bounds_out_stride_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_in_stride <= io_req_bits_outer_bounds_in_stride_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_weight_stride <= io_req_bits_outer_bounds_weight_stride_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_pool_out_row_dim <= io_req_bits_outer_bounds_pool_out_row_dim_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_pool_out_col_dim <= io_req_bits_outer_bounds_pool_out_col_dim_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_stride <= io_req_bits_outer_bounds_stride_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_padding <= io_req_bits_outer_bounds_padding_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_kernel_dim <= io_req_bits_outer_bounds_kernel_dim_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_kernel_dilation <= io_req_bits_outer_bounds_kernel_dilation_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_pool_size <= io_req_bits_outer_bounds_pool_size_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_pool_stride <= io_req_bits_outer_bounds_pool_stride_0; // @[LoopConv.scala:414:7, :437:16]
req_outer_bounds_pool_padding <= io_req_bits_outer_bounds_pool_padding_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_batches <= io_req_bits_inner_bounds_batches_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_porows <= io_req_bits_inner_bounds_porows_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_pocols <= io_req_bits_inner_bounds_pocols_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_pochs <= io_req_bits_inner_bounds_pochs_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_krows <= io_req_bits_inner_bounds_krows_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_kcols <= io_req_bits_inner_bounds_kcols_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_kchs <= io_req_bits_inner_bounds_kchs_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_lpad <= io_req_bits_inner_bounds_lpad_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_rpad <= io_req_bits_inner_bounds_rpad_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_upad <= io_req_bits_inner_bounds_upad_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_dpad <= io_req_bits_inner_bounds_dpad_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_plpad <= io_req_bits_inner_bounds_plpad_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_prad <= io_req_bits_inner_bounds_prad_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_pupad <= io_req_bits_inner_bounds_pupad_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_pdpad <= io_req_bits_inner_bounds_pdpad_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_orows <= io_req_bits_inner_bounds_orows_0; // @[LoopConv.scala:414:7, :437:16]
req_inner_bounds_ocols <= io_req_bits_inner_bounds_ocols_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_ochs <= io_req_bits_derived_params_ochs_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_irows <= io_req_bits_derived_params_irows_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_icols <= io_req_bits_derived_params_icols_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_irows_unpadded <= io_req_bits_derived_params_irows_unpadded_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_icols_unpadded <= io_req_bits_derived_params_icols_unpadded_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_ichs <= io_req_bits_derived_params_ichs_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_out_channels_per_bank <= io_req_bits_derived_params_out_channels_per_bank_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_in_channels_per_bank <= io_req_bits_derived_params_in_channels_per_bank_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_bias_spad_stride <= io_req_bits_derived_params_bias_spad_stride_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_input_spad_stride <= io_req_bits_derived_params_input_spad_stride_0; // @[LoopConv.scala:414:7, :437:16]
req_derived_params_weight_spad_stride <= io_req_bits_derived_params_weight_spad_stride_0; // @[LoopConv.scala:414:7, :437:16]
req_addr_end <= io_req_bits_addr_end_0; // @[LoopConv.scala:414:7, :437:16]
req_dram_addr <= io_req_bits_dram_addr_0; // @[LoopConv.scala:414:7, :437:16]
req_trans_weight_1203 <= io_req_bits_trans_weight_1203_0; // @[LoopConv.scala:414:7, :437:16]
req_trans_weight_0132 <= io_req_bits_trans_weight_0132_0; // @[LoopConv.scala:414:7, :437:16]
req_dw <= io_req_bits_dw_0; // @[LoopConv.scala:414:7, :437:16]
req_loop_id <= io_req_bits_loop_id_0; // @[LoopConv.scala:414:7, :437:16]
och <= 16'h0; // @[LoopConv.scala:460:16]
krow <= 16'h0; // @[LoopConv.scala:461:17]
kcol <= 16'h0; // @[LoopConv.scala:462:17]
kch <= 16'h0; // @[LoopConv.scala:463:16]
end
else if (~(|req_dram_addr) | ~_T_2 | _command_p_io_in_bits_cmd_T) begin // @[Decoupled.scala:51:35]
end
else begin // @[LoopConv.scala:463:16, :545:30, :547:36, :548:29]
och <= next_och; // @[Mux.scala:126:16]
krow <= next_krow; // @[Mux.scala:126:16]
kcol <= next_kcol; // @[Mux.scala:126:16]
kch <= next_kch; // @[Mux.scala:126:16]
end
always @(posedge)
Pipeline_12 command_p ( // @[LoopConv.scala:493:25]
.clock (clock),
.reset (reset),
.io_in_ready (_command_p_io_in_ready),
.io_in_valid (_command_p_io_in_valid_T_4), // @[LoopConv.scala:523:69]
.io_in_bits_cmd_inst_funct (_command_p_io_in_bits_cmd_T_1_inst_funct), // @[LoopConv.scala:524:34]
.io_in_bits_cmd_rs1 (_command_p_io_in_bits_cmd_T_1_rs1), // @[LoopConv.scala:524:34]
.io_in_bits_cmd_rs2 (_command_p_io_in_bits_cmd_T_1_rs2), // @[LoopConv.scala:524:34]
.io_in_bits_dram_addr (dram_addr), // @[LoopConv.scala:471:33]
.io_in_bits_spad_addr (spad_addr), // @[LoopConv.scala:473:22]
.io_in_bits_K (K), // @[LoopConv.scala:482:14]
.io_in_bits_J (J), // @[LoopConv.scala:479:14]
.io_out_ready (_command_p_io_out_ready_T_1), // @[LoopConv.scala:530:42]
.io_out_valid (_command_p_io_out_valid),
.io_out_bits_cmd_inst_funct (_command_p_io_out_bits_cmd_inst_funct),
.io_out_bits_cmd_inst_rs2 (io_cmd_bits_inst_rs2_0),
.io_out_bits_cmd_inst_rs1 (io_cmd_bits_inst_rs1_0),
.io_out_bits_cmd_inst_xd (io_cmd_bits_inst_xd_0),
.io_out_bits_cmd_inst_xs1 (io_cmd_bits_inst_xs1_0),
.io_out_bits_cmd_inst_xs2 (io_cmd_bits_inst_xs2_0),
.io_out_bits_cmd_inst_rd (io_cmd_bits_inst_rd_0),
.io_out_bits_cmd_inst_opcode (io_cmd_bits_inst_opcode_0),
.io_out_bits_cmd_rs1 (_command_p_io_out_bits_cmd_rs1),
.io_out_bits_cmd_rs2 (_command_p_io_out_bits_cmd_rs2),
.io_out_bits_cmd_status_debug (io_cmd_bits_status_debug_0),
.io_out_bits_cmd_status_cease (io_cmd_bits_status_cease_0),
.io_out_bits_cmd_status_wfi (io_cmd_bits_status_wfi_0),
.io_out_bits_cmd_status_isa (io_cmd_bits_status_isa_0),
.io_out_bits_cmd_status_dprv (io_cmd_bits_status_dprv_0),
.io_out_bits_cmd_status_dv (io_cmd_bits_status_dv_0),
.io_out_bits_cmd_status_prv (io_cmd_bits_status_prv_0),
.io_out_bits_cmd_status_v (io_cmd_bits_status_v_0),
.io_out_bits_cmd_status_sd (io_cmd_bits_status_sd_0),
.io_out_bits_cmd_status_zero2 (io_cmd_bits_status_zero2_0),
.io_out_bits_cmd_status_mpv (io_cmd_bits_status_mpv_0),
.io_out_bits_cmd_status_gva (io_cmd_bits_status_gva_0),
.io_out_bits_cmd_status_mbe (io_cmd_bits_status_mbe_0),
.io_out_bits_cmd_status_sbe (io_cmd_bits_status_sbe_0),
.io_out_bits_cmd_status_sxl (io_cmd_bits_status_sxl_0),
.io_out_bits_cmd_status_uxl (io_cmd_bits_status_uxl_0),
.io_out_bits_cmd_status_sd_rv32 (io_cmd_bits_status_sd_rv32_0),
.io_out_bits_cmd_status_zero1 (io_cmd_bits_status_zero1_0),
.io_out_bits_cmd_status_tsr (io_cmd_bits_status_tsr_0),
.io_out_bits_cmd_status_tw (io_cmd_bits_status_tw_0),
.io_out_bits_cmd_status_tvm (io_cmd_bits_status_tvm_0),
.io_out_bits_cmd_status_mxr (io_cmd_bits_status_mxr_0),
.io_out_bits_cmd_status_sum (io_cmd_bits_status_sum_0),
.io_out_bits_cmd_status_mprv (io_cmd_bits_status_mprv_0),
.io_out_bits_cmd_status_xs (io_cmd_bits_status_xs_0),
.io_out_bits_cmd_status_fs (io_cmd_bits_status_fs_0),
.io_out_bits_cmd_status_mpp (io_cmd_bits_status_mpp_0),
.io_out_bits_cmd_status_vs (io_cmd_bits_status_vs_0),
.io_out_bits_cmd_status_spp (io_cmd_bits_status_spp_0),
.io_out_bits_cmd_status_mpie (io_cmd_bits_status_mpie_0),
.io_out_bits_cmd_status_ube (io_cmd_bits_status_ube_0),
.io_out_bits_cmd_status_spie (io_cmd_bits_status_spie_0),
.io_out_bits_cmd_status_upie (io_cmd_bits_status_upie_0),
.io_out_bits_cmd_status_mie (io_cmd_bits_status_mie_0),
.io_out_bits_cmd_status_hie (io_cmd_bits_status_hie_0),
.io_out_bits_cmd_status_sie (io_cmd_bits_status_sie_0),
.io_out_bits_cmd_status_uie (io_cmd_bits_status_uie_0),
.io_out_bits_dram_addr (_command_p_io_out_bits_dram_addr),
.io_out_bits_spad_addr (_command_p_io_out_bits_spad_addr),
.io_out_bits_K (_command_p_io_out_bits_K),
.io_out_bits_J (_command_p_io_out_bits_J),
.io_busy (_command_p_io_busy)
); // @[LoopConv.scala:493:25]
assign io_cmd_bits_inst_funct_0 = _command_p_io_out_bits_cmd_inst_funct; // @[LoopConv.scala:414:7, :493:25]
assign io_req_ready = io_req_ready_0; // @[LoopConv.scala:414:7]
assign io_cmd_valid = io_cmd_valid_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_inst_funct = io_cmd_bits_inst_funct_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_inst_rs2 = io_cmd_bits_inst_rs2_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_inst_rs1 = io_cmd_bits_inst_rs1_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_inst_xd = io_cmd_bits_inst_xd_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_inst_xs1 = io_cmd_bits_inst_xs1_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_inst_xs2 = io_cmd_bits_inst_xs2_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_inst_rd = io_cmd_bits_inst_rd_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_inst_opcode = io_cmd_bits_inst_opcode_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_rs1 = io_cmd_bits_rs1_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_rs2 = io_cmd_bits_rs2_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_debug = io_cmd_bits_status_debug_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_cease = io_cmd_bits_status_cease_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_wfi = io_cmd_bits_status_wfi_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_isa = io_cmd_bits_status_isa_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_dprv = io_cmd_bits_status_dprv_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_dv = io_cmd_bits_status_dv_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_prv = io_cmd_bits_status_prv_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_v = io_cmd_bits_status_v_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_sd = io_cmd_bits_status_sd_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_zero2 = io_cmd_bits_status_zero2_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_mpv = io_cmd_bits_status_mpv_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_gva = io_cmd_bits_status_gva_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_mbe = io_cmd_bits_status_mbe_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_sbe = io_cmd_bits_status_sbe_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_sxl = io_cmd_bits_status_sxl_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_uxl = io_cmd_bits_status_uxl_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_sd_rv32 = io_cmd_bits_status_sd_rv32_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_zero1 = io_cmd_bits_status_zero1_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_tsr = io_cmd_bits_status_tsr_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_tw = io_cmd_bits_status_tw_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_tvm = io_cmd_bits_status_tvm_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_mxr = io_cmd_bits_status_mxr_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_sum = io_cmd_bits_status_sum_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_mprv = io_cmd_bits_status_mprv_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_xs = io_cmd_bits_status_xs_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_fs = io_cmd_bits_status_fs_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_mpp = io_cmd_bits_status_mpp_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_vs = io_cmd_bits_status_vs_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_spp = io_cmd_bits_status_spp_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_mpie = io_cmd_bits_status_mpie_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_ube = io_cmd_bits_status_ube_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_spie = io_cmd_bits_status_spie_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_upie = io_cmd_bits_status_upie_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_mie = io_cmd_bits_status_mie_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_hie = io_cmd_bits_status_hie_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_sie = io_cmd_bits_status_sie_0; // @[LoopConv.scala:414:7]
assign io_cmd_bits_status_uie = io_cmd_bits_status_uie_0; // @[LoopConv.scala:414:7]
assign io_idle = io_idle_0; // @[LoopConv.scala:414:7]
assign io_loop_id = io_loop_id_0; // @[LoopConv.scala:414:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundRawFNToRecFN_e11_s53( // @[RoundAnyRawFNToRecFN.scala:295:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_infiniteExc, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [12:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [55:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [64:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16]
);
RoundAnyRawFNToRecFN_ie11_is55_oe11_os53 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15]
.io_invalidExc (io_invalidExc),
.io_infiniteExc (io_infiniteExc),
.io_in_isNaN (io_in_isNaN),
.io_in_isInf (io_in_isInf),
.io_in_isZero (io_in_isZero),
.io_in_sign (io_in_sign),
.io_in_sExp (io_in_sExp),
.io_in_sig (io_in_sig),
.io_roundingMode (io_roundingMode),
.io_out (io_out),
.io_exceptionFlags (io_exceptionFlags)
); // @[RoundAnyRawFNToRecFN.scala:310:15]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie11_is55_oe11_os53( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_infiniteExc, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [12:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [55:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [64:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16]
);
wire roundingMode_near_even = io_roundingMode == 3'h0; // @[RoundAnyRawFNToRecFN.scala:90:53]
wire roundingMode_odd = io_roundingMode == 3'h6; // @[RoundAnyRawFNToRecFN.scala:95:53]
wire roundMagUp = io_roundingMode == 3'h2 & io_in_sign | io_roundingMode == 3'h3 & ~io_in_sign; // @[RoundAnyRawFNToRecFN.scala:92:53, :93:53, :98:{27,42,63,66}]
wire [11:0] _roundMask_T_1 = ~(io_in_sExp[11:0]); // @[primitives.scala:52:21]
wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> _roundMask_T_1[5:0]); // @[primitives.scala:52:21, :59:26, :76:56]
wire [18:0] _GEN = {roundMask_shift[18:17], roundMask_shift[20:19], roundMask_shift[22:21], roundMask_shift[24:23], roundMask_shift[26:25], roundMask_shift[28:27], roundMask_shift[30:29], roundMask_shift[32:31], roundMask_shift[34:33], roundMask_shift[36]} & 19'h55555; // @[primitives.scala:76:56, :77:20, :78:22]
wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> _roundMask_T_1[5:0]); // @[primitives.scala:52:21, :59:26, :76:56]
wire [53:0] _roundMask_T_128 = _roundMask_T_1[11] ? (_roundMask_T_1[10] ? {~(_roundMask_T_1[9] | _roundMask_T_1[8] | _roundMask_T_1[7] | _roundMask_T_1[6] ? 51'h0 : ~{roundMask_shift[13], roundMask_shift[14], roundMask_shift[15], roundMask_shift[16], roundMask_shift[17], _GEN[18:15] | {roundMask_shift[20:19], roundMask_shift[22:21]} & 4'h5, roundMask_shift[22], _GEN[13] | roundMask_shift[23], roundMask_shift[24], roundMask_shift[25], _GEN[10:7] | {roundMask_shift[28:27], roundMask_shift[30:29]} & 4'h5, roundMask_shift[30], _GEN[5] | roundMask_shift[31], roundMask_shift[32], roundMask_shift[33], {_GEN[2:0], 1'h0} | {roundMask_shift[36:35], roundMask_shift[38:37]} & 4'h5, roundMask_shift[38], roundMask_shift[39], roundMask_shift[40], roundMask_shift[41], roundMask_shift[42], roundMask_shift[43], roundMask_shift[44], roundMask_shift[45], roundMask_shift[46], roundMask_shift[47], roundMask_shift[48], roundMask_shift[49], roundMask_shift[50], roundMask_shift[51], roundMask_shift[52], roundMask_shift[53], roundMask_shift[54], roundMask_shift[55], roundMask_shift[56], roundMask_shift[57], roundMask_shift[58], roundMask_shift[59], roundMask_shift[60], roundMask_shift[61], roundMask_shift[62], roundMask_shift[63]}), 3'h7} : {51'h0, _roundMask_T_1[9] & _roundMask_T_1[8] & _roundMask_T_1[7] & _roundMask_T_1[6] ? {roundMask_shift_1[0], roundMask_shift_1[1], roundMask_shift_1[2]} : 3'h0}) : 54'h0; // @[primitives.scala:52:21, :58:25, :59:26, :62:24, :67:24, :68:58, :73:{17,21,32}, :76:56, :77:20, :78:22]
wire _common_underflow_T_4 = _roundMask_T_128[0] | io_in_sig[55]; // @[primitives.scala:62:24]
wire [54:0] _GEN_0 = {1'h1, ~(_roundMask_T_128[53:1]), ~_common_underflow_T_4}; // @[primitives.scala:62:24]
wire [54:0] _GEN_1 = {_roundMask_T_128[53:1], _common_underflow_T_4, 1'h1}; // @[primitives.scala:62:24]
wire [54:0] _roundPosBit_T = io_in_sig[55:1] & _GEN_0 & _GEN_1; // @[RoundAnyRawFNToRecFN.scala:58:16, :159:42, :162:53, :163:46, :164:40]
wire [54:0] _anyRoundExtra_T = io_in_sig[54:0] & _GEN_1; // @[RoundAnyRawFNToRecFN.scala:58:16, :159:42, :165:42]
wire [109:0] _GEN_2 = {_roundPosBit_T, _anyRoundExtra_T}; // @[RoundAnyRawFNToRecFN.scala:163:46, :164:{40,56}, :165:{42,62}, :166:36]
wire _overflow_roundMagUp_T = roundingMode_near_even | io_roundingMode == 3'h4; // @[RoundAnyRawFNToRecFN.scala:90:53, :94:53, :169:38]
wire [54:0] roundedSig = _overflow_roundMagUp_T & (|_roundPosBit_T) | roundMagUp & (|_GEN_2) ? {1'h0, io_in_sig[55:2] | {_roundMask_T_128[53:1], _common_underflow_T_4}} + 55'h1 & ~(roundingMode_near_even & (|_roundPosBit_T) & _anyRoundExtra_T == 55'h0 ? {_roundMask_T_128[53:1], _common_underflow_T_4, 1'h1} : 55'h0) : {1'h0, io_in_sig[55:2] & {~(_roundMask_T_128[53:1]), ~_common_underflow_T_4}} | (roundingMode_odd & (|_GEN_2) ? _GEN_0 & _GEN_1 : 55'h0); // @[primitives.scala:62:24]
wire [13:0] sRoundedExp = {io_in_sExp[12], io_in_sExp} + {12'h0, roundedSig[54:53]}; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:{40,54}]
wire common_totalUnderflow = $signed(sRoundedExp) < 14'sh3CE; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31]
wire isNaNOut = io_invalidExc | io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:235:34]
wire notNaN_isSpecialInfOut = io_infiniteExc | io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:236:49]
wire commonCase = ~isNaNOut & ~notNaN_isSpecialInfOut & ~io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:235:34, :236:49, :237:{22,33,36,61,64}]
wire overflow = commonCase & $signed(sRoundedExp[13:10]) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:{30,50}, :237:{33,61}, :238:32]
wire overflow_roundMagUp = _overflow_roundMagUp_T | roundMagUp; // @[RoundAnyRawFNToRecFN.scala:98:42, :169:38, :243:60]
wire pegMinNonzeroMagOut = commonCase & common_totalUnderflow & (roundMagUp | roundingMode_odd); // @[RoundAnyRawFNToRecFN.scala:95:53, :98:42, :200:31, :237:{33,61}, :245:{20,45,60}]
wire pegMaxFiniteMagOut = overflow & ~overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:238:32, :243:60, :246:{39,42}]
wire notNaN_isInfOut = notNaN_isSpecialInfOut | overflow & overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:236:49, :238:32, :243:60, :248:{32,45}]
assign io_out = {~isNaNOut & io_in_sign, sRoundedExp[11:0] & ~(io_in_isZero | common_totalUnderflow ? 12'hE00 : 12'h0) & ~(pegMinNonzeroMagOut ? 12'hC31 : 12'h0) & {1'h1, ~pegMaxFiniteMagOut, 10'h3FF} & {2'h3, ~notNaN_isInfOut, 9'h1FF} | (pegMinNonzeroMagOut ? 12'h3CE : 12'h0) | (pegMaxFiniteMagOut ? 12'hBFF : 12'h0) | (notNaN_isInfOut ? 12'hC00 : 12'h0) | (isNaNOut ? 12'hE00 : 12'h0), (isNaNOut | io_in_isZero | common_totalUnderflow ? {isNaNOut, 51'h0} : io_in_sig[55] ? roundedSig[52:1] : roundedSig[51:0]) | {52{pegMaxFiniteMagOut}}}; // @[RoundAnyRawFNToRecFN.scala:48:5, :120:57, :173:16, :185:40, :187:37, :189:16, :190:27, :191:27, :200:31, :235:34, :245:{20,45}, :246:39, :248:32, :250:22, :252:24, :253:{14,18,32}, :256:17, :257:{14,18}, :260:17, :261:{14,18}, :264:17, :265:{14,18}, :268:18, :269:16, :272:15, :273:16, :276:15, :277:{16,73}, :278:16, :280:{12,22,38}, :281:16, :283:11, :284:13, :286:33]
assign io_exceptionFlags = {io_invalidExc, io_infiniteExc, overflow, commonCase & (common_totalUnderflow | (|_GEN_2) & io_in_sExp[12:11] != 2'h1 & (io_in_sig[55] ? _roundMask_T_128[1] : _common_underflow_T_4) & ~(~(io_in_sig[55] ? _roundMask_T_128[2] : _roundMask_T_128[1]) & (io_in_sig[55] ? roundedSig[54] : roundedSig[53]) & (|_roundPosBit_T) & (_overflow_roundMagUp_T & (io_in_sig[55] ? io_in_sig[2] : io_in_sig[1]) | roundMagUp & (|{io_in_sig[55] & io_in_sig[2], io_in_sig[1:0]})))), overflow | commonCase & (|{common_totalUnderflow, _roundPosBit_T, _anyRoundExtra_T})}; // @[primitives.scala:62:24]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundRawFNToRecFN_e8_s24_126( // @[RoundAnyRawFNToRecFN.scala:295:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_126 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15]
.io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags_0)
); // @[RoundAnyRawFNToRecFN.scala:310:15]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File TLSerdes.scala:
package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
object TLSerdesser {
// This should be the standard bundle type for TLSerdesser
val STANDARD_TLBUNDLE_PARAMS = TLBundleParameters(
addressBits=64, dataBits=64,
sourceBits=8, sinkBits=8, sizeBits=8,
echoFields=Nil, requestFields=Nil, responseFields=Nil,
hasBCE=true)
}
class SerdesDebugIO extends Bundle {
val ser_busy = Output(Bool())
val des_busy = Output(Bool())
}
class TLSerdesser(
val flitWidth: Int,
clientPortParams: Option[TLMasterPortParameters],
managerPortParams: Option[TLSlavePortParameters],
val bundleParams: TLBundleParameters = TLSerdesser.STANDARD_TLBUNDLE_PARAMS,
nameSuffix: Option[String] = None
)
(implicit p: Parameters) extends LazyModule {
require (clientPortParams.isDefined || managerPortParams.isDefined)
val clientNode = clientPortParams.map { c => TLClientNode(Seq(c)) }
val managerNode = managerPortParams.map { m => TLManagerNode(Seq(m)) }
override lazy val desiredName = (Seq("TLSerdesser") ++ nameSuffix).mkString("_")
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val ser = Vec(5, new DecoupledFlitIO(flitWidth))
val debug = new SerdesDebugIO
})
val client_tl = clientNode.map(_.out(0)._1).getOrElse(0.U.asTypeOf(new TLBundle(bundleParams)))
val client_edge = clientNode.map(_.out(0)._2)
val manager_tl = managerNode.map(_.in(0)._1).getOrElse(0.U.asTypeOf(new TLBundle(bundleParams)))
val manager_edge = managerNode.map(_.in(0)._2)
val clientParams = client_edge.map(_.bundle).getOrElse(bundleParams)
val managerParams = manager_edge.map(_.bundle).getOrElse(bundleParams)
val mergedParams = clientParams.union(managerParams).union(bundleParams)
require(mergedParams.echoFields.isEmpty, "TLSerdesser does not support TileLink with echo fields")
require(mergedParams.requestFields.isEmpty, "TLSerdesser does not support TileLink with request fields")
require(mergedParams.responseFields.isEmpty, "TLSerdesser does not support TileLink with response fields")
require(mergedParams == bundleParams, s"TLSerdesser is misconfigured, the combined inwards/outwards parameters cannot be serialized using the provided bundle params\n$mergedParams > $bundleParams")
val out_channels = Seq(
(manager_tl.e, manager_edge.map(e => Module(new TLEToBeat(e, mergedParams, nameSuffix)))),
(client_tl.d, client_edge.map (e => Module(new TLDToBeat(e, mergedParams, nameSuffix)))),
(manager_tl.c, manager_edge.map(e => Module(new TLCToBeat(e, mergedParams, nameSuffix)))),
(client_tl.b, client_edge.map (e => Module(new TLBToBeat(e, mergedParams, nameSuffix)))),
(manager_tl.a, manager_edge.map(e => Module(new TLAToBeat(e, mergedParams, nameSuffix))))
)
io.ser.map(_.out.valid := false.B)
io.ser.map(_.out.bits := DontCare)
val out_sers = out_channels.zipWithIndex.map { case ((c,b),i) => b.map { b =>
b.io.protocol <> c
val ser = Module(new GenericSerializer(b.io.beat.bits.cloneType, flitWidth)).suggestName(s"ser_$i")
ser.io.in <> b.io.beat
io.ser(i).out <> ser.io.out
ser
}}.flatten
io.debug.ser_busy := out_sers.map(_.io.busy).orR
val in_channels = Seq(
(client_tl.e, Module(new TLEFromBeat(mergedParams, nameSuffix))),
(manager_tl.d, Module(new TLDFromBeat(mergedParams, nameSuffix))),
(client_tl.c, Module(new TLCFromBeat(mergedParams, nameSuffix))),
(manager_tl.b, Module(new TLBFromBeat(mergedParams, nameSuffix))),
(client_tl.a, Module(new TLAFromBeat(mergedParams, nameSuffix)))
)
val in_desers = in_channels.zipWithIndex.map { case ((c,b),i) =>
c <> b.io.protocol
val des = Module(new GenericDeserializer(b.io.beat.bits.cloneType, flitWidth)).suggestName(s"des_$i")
des.io.in <> io.ser(i).in
b.io.beat <> des.io.out
des
}
io.debug.des_busy := in_desers.map(_.io.busy).orR
}
}
| module TLSerdesser_SerialRAM( // @[TLSerdes.scala:39:9]
input clock, // @[TLSerdes.scala:39:9]
input reset, // @[TLSerdes.scala:39:9]
output auto_manager_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_manager_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_manager_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_manager_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_manager_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_manager_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_manager_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_manager_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_manager_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_manager_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_manager_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_manager_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_manager_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_manager_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_manager_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_manager_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_manager_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_manager_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_manager_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_manager_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output io_ser_0_in_ready, // @[TLSerdes.scala:40:16]
input io_ser_0_in_valid, // @[TLSerdes.scala:40:16]
input [31:0] io_ser_0_in_bits_flit, // @[TLSerdes.scala:40:16]
output [31:0] io_ser_0_out_bits_flit, // @[TLSerdes.scala:40:16]
output io_ser_1_in_ready, // @[TLSerdes.scala:40:16]
input io_ser_1_in_valid, // @[TLSerdes.scala:40:16]
input [31:0] io_ser_1_in_bits_flit, // @[TLSerdes.scala:40:16]
output io_ser_2_in_ready, // @[TLSerdes.scala:40:16]
input io_ser_2_in_valid, // @[TLSerdes.scala:40:16]
input [31:0] io_ser_2_in_bits_flit, // @[TLSerdes.scala:40:16]
input io_ser_2_out_ready, // @[TLSerdes.scala:40:16]
output io_ser_2_out_valid, // @[TLSerdes.scala:40:16]
output [31:0] io_ser_2_out_bits_flit, // @[TLSerdes.scala:40:16]
output io_ser_3_in_ready, // @[TLSerdes.scala:40:16]
input io_ser_3_in_valid, // @[TLSerdes.scala:40:16]
input [31:0] io_ser_3_in_bits_flit, // @[TLSerdes.scala:40:16]
output io_ser_4_in_ready, // @[TLSerdes.scala:40:16]
input io_ser_4_in_valid, // @[TLSerdes.scala:40:16]
input [31:0] io_ser_4_in_bits_flit, // @[TLSerdes.scala:40:16]
input io_ser_4_out_ready, // @[TLSerdes.scala:40:16]
output io_ser_4_out_valid, // @[TLSerdes.scala:40:16]
output [31:0] io_ser_4_out_bits_flit // @[TLSerdes.scala:40:16]
);
wire _des_4_io_out_valid; // @[TLSerdes.scala:86:23]
wire _des_4_io_out_bits_head; // @[TLSerdes.scala:86:23]
wire _des_4_io_out_bits_tail; // @[TLSerdes.scala:86:23]
wire _des_3_io_out_valid; // @[TLSerdes.scala:86:23]
wire _des_3_io_out_bits_head; // @[TLSerdes.scala:86:23]
wire _des_3_io_out_bits_tail; // @[TLSerdes.scala:86:23]
wire _des_2_io_out_valid; // @[TLSerdes.scala:86:23]
wire _des_2_io_out_bits_head; // @[TLSerdes.scala:86:23]
wire _des_2_io_out_bits_tail; // @[TLSerdes.scala:86:23]
wire _des_1_io_out_valid; // @[TLSerdes.scala:86:23]
wire [64:0] _des_1_io_out_bits_payload; // @[TLSerdes.scala:86:23]
wire _des_1_io_out_bits_head; // @[TLSerdes.scala:86:23]
wire _des_1_io_out_bits_tail; // @[TLSerdes.scala:86:23]
wire _des_0_io_out_valid; // @[TLSerdes.scala:86:23]
wire _des_0_io_out_bits_head; // @[TLSerdes.scala:86:23]
wire _des_0_io_out_bits_tail; // @[TLSerdes.scala:86:23]
wire _in_channels_4_2_io_beat_ready; // @[TLSerdes.scala:82:28]
wire _in_channels_3_2_io_beat_ready; // @[TLSerdes.scala:81:28]
wire _in_channels_2_2_io_beat_ready; // @[TLSerdes.scala:80:28]
wire _in_channels_1_2_io_protocol_valid; // @[TLSerdes.scala:79:28]
wire [2:0] _in_channels_1_2_io_protocol_bits_opcode; // @[TLSerdes.scala:79:28]
wire [1:0] _in_channels_1_2_io_protocol_bits_param; // @[TLSerdes.scala:79:28]
wire [7:0] _in_channels_1_2_io_protocol_bits_size; // @[TLSerdes.scala:79:28]
wire [7:0] _in_channels_1_2_io_protocol_bits_source; // @[TLSerdes.scala:79:28]
wire [7:0] _in_channels_1_2_io_protocol_bits_sink; // @[TLSerdes.scala:79:28]
wire _in_channels_1_2_io_protocol_bits_denied; // @[TLSerdes.scala:79:28]
wire _in_channels_1_2_io_protocol_bits_corrupt; // @[TLSerdes.scala:79:28]
wire _in_channels_1_2_io_beat_ready; // @[TLSerdes.scala:79:28]
wire _in_channels_0_2_io_beat_ready; // @[TLSerdes.scala:78:28]
wire _ser_4_io_in_ready; // @[TLSerdes.scala:69:23]
wire _out_channels_4_2_io_protocol_ready; // @[TLSerdes.scala:63:50]
wire _out_channels_4_2_io_beat_valid; // @[TLSerdes.scala:63:50]
wire [85:0] _out_channels_4_2_io_beat_bits_payload; // @[TLSerdes.scala:63:50]
wire _out_channels_4_2_io_beat_bits_head; // @[TLSerdes.scala:63:50]
wire _out_channels_4_2_io_beat_bits_tail; // @[TLSerdes.scala:63:50]
wire _out_channels_2_2_io_beat_bits_head; // @[TLSerdes.scala:61:50]
wire _out_channels_0_2_io_beat_bits_head; // @[TLSerdes.scala:59:50]
TLMonitor_105 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_out_channels_4_2_io_protocol_ready), // @[TLSerdes.scala:63:50]
.io_in_a_valid (auto_manager_in_a_valid),
.io_in_a_bits_opcode (auto_manager_in_a_bits_opcode),
.io_in_a_bits_param (auto_manager_in_a_bits_param),
.io_in_a_bits_size (auto_manager_in_a_bits_size),
.io_in_a_bits_source (auto_manager_in_a_bits_source),
.io_in_a_bits_address (auto_manager_in_a_bits_address),
.io_in_a_bits_mask (auto_manager_in_a_bits_mask),
.io_in_a_bits_corrupt (auto_manager_in_a_bits_corrupt),
.io_in_d_ready (auto_manager_in_d_ready),
.io_in_d_valid (_in_channels_1_2_io_protocol_valid), // @[TLSerdes.scala:79:28]
.io_in_d_bits_opcode (_in_channels_1_2_io_protocol_bits_opcode), // @[TLSerdes.scala:79:28]
.io_in_d_bits_param (_in_channels_1_2_io_protocol_bits_param), // @[TLSerdes.scala:79:28]
.io_in_d_bits_size (_in_channels_1_2_io_protocol_bits_size[3:0]), // @[TLSerdes.scala:79:28, :85:9]
.io_in_d_bits_source (_in_channels_1_2_io_protocol_bits_source[0]), // @[TLSerdes.scala:79:28, :85:9]
.io_in_d_bits_sink (_in_channels_1_2_io_protocol_bits_sink[4:0]), // @[TLSerdes.scala:79:28, :85:9]
.io_in_d_bits_denied (_in_channels_1_2_io_protocol_bits_denied), // @[TLSerdes.scala:79:28]
.io_in_d_bits_corrupt (_in_channels_1_2_io_protocol_bits_corrupt) // @[TLSerdes.scala:79:28]
); // @[Nodes.scala:27:25]
TLEToBeat_SerialRAM_a64d64s8k8z8c out_channels_0_2 ( // @[TLSerdes.scala:59:50]
.clock (clock),
.reset (reset),
.io_beat_bits_head (_out_channels_0_2_io_beat_bits_head)
); // @[TLSerdes.scala:59:50]
TLCToBeat_SerialRAM_a64d64s8k8z8c out_channels_2_2 ( // @[TLSerdes.scala:61:50]
.clock (clock),
.reset (reset),
.io_beat_bits_head (_out_channels_2_2_io_beat_bits_head)
); // @[TLSerdes.scala:61:50]
TLAToBeat_SerialRAM_a64d64s8k8z8c out_channels_4_2 ( // @[TLSerdes.scala:63:50]
.clock (clock),
.reset (reset),
.io_protocol_ready (_out_channels_4_2_io_protocol_ready),
.io_protocol_valid (auto_manager_in_a_valid),
.io_protocol_bits_opcode (auto_manager_in_a_bits_opcode),
.io_protocol_bits_param (auto_manager_in_a_bits_param),
.io_protocol_bits_size ({4'h0, auto_manager_in_a_bits_size}), // @[TLSerdes.scala:68:21]
.io_protocol_bits_source ({7'h0, auto_manager_in_a_bits_source}), // @[TLSerdes.scala:68:21]
.io_protocol_bits_address ({32'h0, auto_manager_in_a_bits_address}), // @[TLSerdes.scala:68:21]
.io_protocol_bits_mask (auto_manager_in_a_bits_mask),
.io_protocol_bits_data (auto_manager_in_a_bits_data),
.io_protocol_bits_corrupt (auto_manager_in_a_bits_corrupt),
.io_beat_ready (_ser_4_io_in_ready), // @[TLSerdes.scala:69:23]
.io_beat_valid (_out_channels_4_2_io_beat_valid),
.io_beat_bits_payload (_out_channels_4_2_io_beat_bits_payload),
.io_beat_bits_head (_out_channels_4_2_io_beat_bits_head),
.io_beat_bits_tail (_out_channels_4_2_io_beat_bits_tail)
); // @[TLSerdes.scala:63:50]
GenericSerializer_TLBeatw10_f32 ser_0 ( // @[TLSerdes.scala:69:23]
.io_in_bits_head (_out_channels_0_2_io_beat_bits_head), // @[TLSerdes.scala:59:50]
.io_out_bits_flit (io_ser_0_out_bits_flit)
); // @[TLSerdes.scala:69:23]
GenericSerializer_TLBeatw88_f32 ser_2 ( // @[TLSerdes.scala:69:23]
.clock (clock),
.reset (reset),
.io_in_ready (/* unused */),
.io_in_valid (1'h0), // @[TLSerdes.scala:39:9, :40:16, :59:50, :61:50, :69:23]
.io_in_bits_payload (86'h0), // @[TLSerdes.scala:61:50, :69:23]
.io_in_bits_head (_out_channels_2_2_io_beat_bits_head), // @[TLSerdes.scala:61:50]
.io_in_bits_tail (1'h1), // @[TLSerdes.scala:59:50, :61:50, :69:23]
.io_out_ready (io_ser_2_out_ready),
.io_out_valid (io_ser_2_out_valid),
.io_out_bits_flit (io_ser_2_out_bits_flit)
); // @[TLSerdes.scala:69:23]
GenericSerializer_TLBeatw88_f32 ser_4 ( // @[TLSerdes.scala:69:23]
.clock (clock),
.reset (reset),
.io_in_ready (_ser_4_io_in_ready),
.io_in_valid (_out_channels_4_2_io_beat_valid), // @[TLSerdes.scala:63:50]
.io_in_bits_payload (_out_channels_4_2_io_beat_bits_payload), // @[TLSerdes.scala:63:50]
.io_in_bits_head (_out_channels_4_2_io_beat_bits_head), // @[TLSerdes.scala:63:50]
.io_in_bits_tail (_out_channels_4_2_io_beat_bits_tail), // @[TLSerdes.scala:63:50]
.io_out_ready (io_ser_4_out_ready),
.io_out_valid (io_ser_4_out_valid),
.io_out_bits_flit (io_ser_4_out_bits_flit)
); // @[TLSerdes.scala:69:23]
TLEFromBeat_SerialRAM_a64d64s8k8z8c in_channels_0_2 ( // @[TLSerdes.scala:78:28]
.clock (clock),
.reset (reset),
.io_beat_ready (_in_channels_0_2_io_beat_ready),
.io_beat_valid (_des_0_io_out_valid), // @[TLSerdes.scala:86:23]
.io_beat_bits_head (_des_0_io_out_bits_head), // @[TLSerdes.scala:86:23]
.io_beat_bits_tail (_des_0_io_out_bits_tail) // @[TLSerdes.scala:86:23]
); // @[TLSerdes.scala:78:28]
TLDFromBeat_SerialRAM_a64d64s8k8z8c in_channels_1_2 ( // @[TLSerdes.scala:79:28]
.clock (clock),
.reset (reset),
.io_protocol_ready (auto_manager_in_d_ready),
.io_protocol_valid (_in_channels_1_2_io_protocol_valid),
.io_protocol_bits_opcode (_in_channels_1_2_io_protocol_bits_opcode),
.io_protocol_bits_param (_in_channels_1_2_io_protocol_bits_param),
.io_protocol_bits_size (_in_channels_1_2_io_protocol_bits_size),
.io_protocol_bits_source (_in_channels_1_2_io_protocol_bits_source),
.io_protocol_bits_sink (_in_channels_1_2_io_protocol_bits_sink),
.io_protocol_bits_denied (_in_channels_1_2_io_protocol_bits_denied),
.io_protocol_bits_data (auto_manager_in_d_bits_data),
.io_protocol_bits_corrupt (_in_channels_1_2_io_protocol_bits_corrupt),
.io_beat_ready (_in_channels_1_2_io_beat_ready),
.io_beat_valid (_des_1_io_out_valid), // @[TLSerdes.scala:86:23]
.io_beat_bits_payload (_des_1_io_out_bits_payload), // @[TLSerdes.scala:86:23]
.io_beat_bits_head (_des_1_io_out_bits_head), // @[TLSerdes.scala:86:23]
.io_beat_bits_tail (_des_1_io_out_bits_tail) // @[TLSerdes.scala:86:23]
); // @[TLSerdes.scala:79:28]
TLCFromBeat_SerialRAM_a64d64s8k8z8c in_channels_2_2 ( // @[TLSerdes.scala:80:28]
.clock (clock),
.reset (reset),
.io_beat_ready (_in_channels_2_2_io_beat_ready),
.io_beat_valid (_des_2_io_out_valid), // @[TLSerdes.scala:86:23]
.io_beat_bits_head (_des_2_io_out_bits_head), // @[TLSerdes.scala:86:23]
.io_beat_bits_tail (_des_2_io_out_bits_tail) // @[TLSerdes.scala:86:23]
); // @[TLSerdes.scala:80:28]
TLBFromBeat_SerialRAM_a64d64s8k8z8c in_channels_3_2 ( // @[TLSerdes.scala:81:28]
.clock (clock),
.reset (reset),
.io_beat_ready (_in_channels_3_2_io_beat_ready),
.io_beat_valid (_des_3_io_out_valid), // @[TLSerdes.scala:86:23]
.io_beat_bits_head (_des_3_io_out_bits_head), // @[TLSerdes.scala:86:23]
.io_beat_bits_tail (_des_3_io_out_bits_tail) // @[TLSerdes.scala:86:23]
); // @[TLSerdes.scala:81:28]
TLAFromBeat_SerialRAM_a64d64s8k8z8c in_channels_4_2 ( // @[TLSerdes.scala:82:28]
.clock (clock),
.reset (reset),
.io_beat_ready (_in_channels_4_2_io_beat_ready),
.io_beat_valid (_des_4_io_out_valid), // @[TLSerdes.scala:86:23]
.io_beat_bits_head (_des_4_io_out_bits_head), // @[TLSerdes.scala:86:23]
.io_beat_bits_tail (_des_4_io_out_bits_tail) // @[TLSerdes.scala:86:23]
); // @[TLSerdes.scala:82:28]
GenericDeserializer_TLBeatw10_f32 des_0 ( // @[TLSerdes.scala:86:23]
.io_in_ready (io_ser_0_in_ready),
.io_in_valid (io_ser_0_in_valid),
.io_in_bits_flit (io_ser_0_in_bits_flit),
.io_out_ready (_in_channels_0_2_io_beat_ready), // @[TLSerdes.scala:78:28]
.io_out_valid (_des_0_io_out_valid),
.io_out_bits_head (_des_0_io_out_bits_head),
.io_out_bits_tail (_des_0_io_out_bits_tail)
); // @[TLSerdes.scala:86:23]
GenericDeserializer_TLBeatw67_f32 des_1 ( // @[TLSerdes.scala:86:23]
.clock (clock),
.reset (reset),
.io_in_ready (io_ser_1_in_ready),
.io_in_valid (io_ser_1_in_valid),
.io_in_bits_flit (io_ser_1_in_bits_flit),
.io_out_ready (_in_channels_1_2_io_beat_ready), // @[TLSerdes.scala:79:28]
.io_out_valid (_des_1_io_out_valid),
.io_out_bits_payload (_des_1_io_out_bits_payload),
.io_out_bits_head (_des_1_io_out_bits_head),
.io_out_bits_tail (_des_1_io_out_bits_tail)
); // @[TLSerdes.scala:86:23]
GenericDeserializer_TLBeatw88_f32 des_2 ( // @[TLSerdes.scala:86:23]
.clock (clock),
.reset (reset),
.io_in_ready (io_ser_2_in_ready),
.io_in_valid (io_ser_2_in_valid),
.io_in_bits_flit (io_ser_2_in_bits_flit),
.io_out_ready (_in_channels_2_2_io_beat_ready), // @[TLSerdes.scala:80:28]
.io_out_valid (_des_2_io_out_valid),
.io_out_bits_payload (/* unused */),
.io_out_bits_head (_des_2_io_out_bits_head),
.io_out_bits_tail (_des_2_io_out_bits_tail)
); // @[TLSerdes.scala:86:23]
GenericDeserializer_TLBeatw87_f32 des_3 ( // @[TLSerdes.scala:86:23]
.clock (clock),
.reset (reset),
.io_in_ready (io_ser_3_in_ready),
.io_in_valid (io_ser_3_in_valid),
.io_in_bits_flit (io_ser_3_in_bits_flit),
.io_out_ready (_in_channels_3_2_io_beat_ready), // @[TLSerdes.scala:81:28]
.io_out_valid (_des_3_io_out_valid),
.io_out_bits_head (_des_3_io_out_bits_head),
.io_out_bits_tail (_des_3_io_out_bits_tail)
); // @[TLSerdes.scala:86:23]
GenericDeserializer_TLBeatw88_f32 des_4 ( // @[TLSerdes.scala:86:23]
.clock (clock),
.reset (reset),
.io_in_ready (io_ser_4_in_ready),
.io_in_valid (io_ser_4_in_valid),
.io_in_bits_flit (io_ser_4_in_bits_flit),
.io_out_ready (_in_channels_4_2_io_beat_ready), // @[TLSerdes.scala:82:28]
.io_out_valid (_des_4_io_out_valid),
.io_out_bits_payload (/* unused */),
.io_out_bits_head (_des_4_io_out_bits_head),
.io_out_bits_tail (_des_4_io_out_bits_tail)
); // @[TLSerdes.scala:86:23]
assign auto_manager_in_a_ready = _out_channels_4_2_io_protocol_ready; // @[TLSerdes.scala:39:9, :63:50]
assign auto_manager_in_d_valid = _in_channels_1_2_io_protocol_valid; // @[TLSerdes.scala:39:9, :79:28]
assign auto_manager_in_d_bits_opcode = _in_channels_1_2_io_protocol_bits_opcode; // @[TLSerdes.scala:39:9, :79:28]
assign auto_manager_in_d_bits_param = _in_channels_1_2_io_protocol_bits_param; // @[TLSerdes.scala:39:9, :79:28]
assign auto_manager_in_d_bits_size = _in_channels_1_2_io_protocol_bits_size[3:0]; // @[TLSerdes.scala:39:9, :79:28, :85:9]
assign auto_manager_in_d_bits_source = _in_channels_1_2_io_protocol_bits_source[0]; // @[TLSerdes.scala:39:9, :79:28, :85:9]
assign auto_manager_in_d_bits_sink = _in_channels_1_2_io_protocol_bits_sink[4:0]; // @[TLSerdes.scala:39:9, :79:28, :85:9]
assign auto_manager_in_d_bits_denied = _in_channels_1_2_io_protocol_bits_denied; // @[TLSerdes.scala:39:9, :79:28]
assign auto_manager_in_d_bits_corrupt = _in_channels_1_2_io_protocol_bits_corrupt; // @[TLSerdes.scala:39:9, :79:28]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_493( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid // @[PE.scala:35:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7]
wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60]
wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [7:0] c1; // @[PE.scala:70:15]
wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [7:0] c2; // @[PE.scala:71:15]
wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25]
wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}]
wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16]
wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8]
c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15]
if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8]
c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15]
if (io_in_valid_0) // @[PE.scala:31:7]
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
always @(posedge)
MacUnit_237 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_b_0), // @[PE.scala:31:7]
.io_out_d (io_out_b_0)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File rob.scala:
//******************************************************************************
// Copyright (c) 2013 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Re-order Buffer
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Bank the ROB, such that each "dispatch" group gets its own row of the ROB,
// and each instruction in the dispatch group goes to a different bank.
// We can compress out the PC by only saving the high-order bits!
//
// ASSUMPTIONS:
// - dispatch groups are aligned to the PC.
//
// NOTES:
// - Currently we do not compress out bubbles in the ROB.
// - Exceptions are only taken when at the head of the commit bundle --
// this helps deal with loads, stores, and refetch instructions.
package boom.v4.exu
import scala.math.ceil
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
import boom.v4.common._
import boom.v4.util._
/**
* IO bundle to interact with the ROB
*
* @param numWakeupPorts number of wakeup ports to the rob
* @param numFpuPorts number of fpu ports that will write back fflags
*/
class RobIo(
val numWakeupPorts: Int
)(implicit p: Parameters) extends BoomBundle
{
// Decode Stage
// (Allocate, write instruction to ROB).
val enq_valids = Input(Vec(coreWidth, Bool()))
val enq_uops = Input(Vec(coreWidth, new MicroOp()))
val enq_partial_stall= Input(Bool()) // we're dispatching only a partial packet,
// and stalling on the rest of it (don't
// advance the tail ptr)
val xcpt_fetch_pc = Input(UInt(vaddrBitsExtended.W))
val rob_tail_idx = Output(UInt(robAddrSz.W))
val rob_pnr_idx = Output(UInt(robAddrSz.W))
val rob_head_idx = Output(UInt(robAddrSz.W))
// Handle Branch Misspeculations
val brupdate = Input(new BrUpdateInfo())
// Write-back Stage
// (Update of ROB)
// Instruction is no longer busy and can be committed
val wb_resps = Flipped(Vec(numWakeupPorts, Valid(new ExeUnitResp(xLen max fLen+1))))
// Unbusying ports for stores.
val lsu_clr_bsy = Input(Vec(coreWidth, Valid(UInt(robAddrSz.W))))
// Port for unmarking loads/stores as speculation hazards..
val lsu_clr_unsafe = Input(Vec(lsuWidth, Valid(UInt(robAddrSz.W))))
val lxcpt = Flipped(new ValidIO(new Exception())) // LSU
val csr_replay = Input(Valid(new Exception()))
// Commit stage (free resources).
val commit = Output(new CommitSignals())
val rollback = Bool()
// tell the LSU that the head of the ROB is a load
// (some loads can only execute once they are at the head of the ROB).
val com_load_is_at_rob_head = Output(Bool())
// Communicate exceptions to the CSRFile
val com_xcpt = Valid(new CommitExceptionSignals())
// Let the CSRFile stall us (e.g., wfi).
val csr_stall = Input(Bool())
// Flush signals (including exceptions, pipeline replays, and memory ordering failures)
// to send to the frontend for redirection.
val flush = Valid(new CommitExceptionSignals)
// Stall Decode as appropriate
val empty = Output(Bool())
val ready = Output(Bool()) // ROB is busy unrolling rename state...
// Stall the frontend if we know we will redirect the PC
val flush_frontend = Output(Bool())
val debug_tsc = Input(UInt(xLen.W))
}
/**
* Bundle to send commit signals across processor
*/
class CommitSignals(implicit p: Parameters) extends BoomBundle
{
val valids = Vec(retireWidth, Bool()) // These instructions may not correspond to an architecturally executed insn
val arch_valids = Vec(retireWidth, Bool())
val uops = Vec(retireWidth, new MicroOp())
val fflags = Valid(UInt(5.W))
// These come a cycle later
val debug_insts = Vec(retireWidth, UInt(32.W))
val debug_wdata = Vec(retireWidth, UInt(xLen.W))
}
/**
* Bundle to communicate exceptions to CSRFile
*
* TODO combine FlushSignals and ExceptionSignals (currently timed to different cycles).
*/
class CommitExceptionSignals(implicit p: Parameters) extends BoomBundle
{
val ftq_idx = UInt(log2Ceil(ftqSz).W)
val edge_inst = Bool()
val is_rvc = Bool()
val pc_lob = UInt(log2Ceil(icBlockBytes).W)
val cause = UInt(xLen.W)
val badvaddr = UInt(xLen.W)
// The ROB needs to tell the FTQ if there's a pipeline flush (and what type)
// so the FTQ can drive the frontend with the correct redirected PC.
val flush_typ = FlushTypes()
}
/**
* Tell the frontend the type of flush so it can set up the next PC properly.
*/
object FlushTypes
{
def SZ = 3
def apply() = UInt(SZ.W)
def none = 0.U
def xcpt = 1.U // An exception occurred.
def eret = (2+1).U // Execute an environment return instruction.
def refetch = 2.U // Flush and refetch the head instruction.
def next = 4.U // Flush and fetch the next instruction.
def useCsrEvec(typ: UInt): Bool = typ(0) // typ === xcpt.U || typ === eret.U
def useSamePC(typ: UInt): Bool = typ === refetch
def usePCplus4(typ: UInt): Bool = typ === next
def getType(valid: Bool, i_xcpt: Bool, i_eret: Bool, i_refetch: Bool): UInt = {
val ret =
Mux(!valid, none,
Mux(i_eret, eret,
Mux(i_xcpt, xcpt,
Mux(i_refetch, refetch,
next))))
ret
}
}
/**
* Bundle of signals indicating that an exception occurred
*/
class Exception(implicit p: Parameters) extends BoomBundle
{
val uop = new MicroOp()
val cause = Bits(log2Ceil(freechips.rocketchip.rocket.Causes.all.max+2).W)
val badvaddr = UInt(coreMaxAddrBits.W)
}
/**
* Bundle for debug ROB signals
* These should not be synthesized!
*/
class DebugRobSignals(implicit p: Parameters) extends BoomBundle
{
val state = UInt()
val rob_head = UInt(robAddrSz.W)
val rob_pnr = UInt(robAddrSz.W)
val xcpt_val = Bool()
val xcpt_uop = new MicroOp()
val xcpt_badvaddr = UInt(xLen.W)
}
/**
* Reorder Buffer to keep track of dependencies and inflight instructions
*
* @param numWakeupPorts number of wakeup ports to the ROB
* @param numFpuPorts number of FPU units that will write back fflags
*/
class Rob(
val numWakeupPorts: Int,
val usingTrace: Boolean
)(implicit p: Parameters) extends BoomModule
{
val io = IO(new RobIo(numWakeupPorts))
// ROB Finite State Machine
val s_reset :: s_normal :: s_wait_till_empty :: s_rollback :: Nil = Enum(4)
val rob_state = RegInit(s_reset)
//commit entries at the head, and unwind exceptions from the tail
val rob_head = RegInit(0.U(log2Ceil(numRobRows).W))
val rob_head_lsb = RegInit(0.U((1 max log2Ceil(coreWidth)).W)) // TODO: Accurately track head LSB (currently always 0)
val rob_head_idx = if (coreWidth == 1) rob_head else Cat(rob_head, rob_head_lsb)
val rob_tail = RegInit(0.U(log2Ceil(numRobRows).W))
val rob_tail_lsb = RegInit(0.U((1 max log2Ceil(coreWidth)).W))
val rob_tail_idx = if (coreWidth == 1) rob_tail else Cat(rob_tail, rob_tail_lsb)
val rob_pnr = RegInit(0.U(log2Ceil(numRobRows).W))
val rob_pnr_lsb = RegInit(0.U((1 max log2Ceil(coreWidth)).W))
val rob_pnr_idx = if (coreWidth == 1) rob_pnr else Cat(rob_pnr , rob_pnr_lsb)
val next_rob_head = WireInit(rob_head)
rob_head := next_rob_head
val full = Wire(Bool())
val empty = Wire(Bool())
val will_commit = Wire(Vec(coreWidth, Bool()))
val can_commit = Wire(Vec(coreWidth, Bool()))
val can_throw_exception = Wire(Vec(coreWidth, Bool()))
val rob_pnr_unsafe = Wire(Vec(coreWidth, Bool())) // are the instructions at the pnr unsafe?
val rob_head_vals = Wire(Vec(coreWidth, Bool())) // are the instructions at the head valid?
val rob_tail_vals = Wire(Vec(coreWidth, Bool())) // are the instructions at the tail valid? (to track partial row dispatches)
val rob_head_uses_stq = Wire(Vec(coreWidth, Bool()))
val rob_head_uses_ldq = Wire(Vec(coreWidth, Bool()))
val rob_head_fflags = Wire(Vec(coreWidth, Valid(UInt(freechips.rocketchip.tile.FPConstants.FLAGS_SZ.W))))
val exception_thrown = Wire(Bool())
// exception info
// TODO compress xcpt cause size. Most bits in the middle are zero.
val r_xcpt_val = RegInit(false.B)
val r_xcpt_uop = Reg(new MicroOp())
val r_xcpt_badvaddr = Reg(UInt(coreMaxAddrBits.W))
io.flush_frontend := r_xcpt_val
//--------------------------------------------------
// Utility
def GetRowIdx(rob_idx: UInt): UInt = {
if (coreWidth == 1) return rob_idx
else return rob_idx >> log2Ceil(coreWidth).U
}
def GetBankIdx(rob_idx: UInt): UInt = {
if(coreWidth == 1) { return 0.U }
else { return rob_idx(log2Ceil(coreWidth)-1, 0).asUInt }
}
// **************************************************************************
// Debug
class DebugRobBundle extends BoomBundle
{
val valid = Bool()
val busy = Bool()
val unsafe = Bool()
val uop = new MicroOp()
val exception = Bool()
}
val debug_entry = Wire(Vec(numRobEntries, new DebugRobBundle))
debug_entry := DontCare // override in statements below
// **************************************************************************
// --------------------------------------------------------------------------
// **************************************************************************
// Contains all information the PNR needs to find the oldest instruction which can't be safely speculated past.
val rob_unsafe_masked = WireInit(VecInit(Seq.fill(numRobRows << log2Ceil(coreWidth)){false.B}))
val rob_debug_inst_rdata = Wire(Vec(coreWidth, UInt(32.W)))
val rob_debug_inst_wmask = WireInit(VecInit(0.U(coreWidth.W).asBools))
val rob_debug_inst_wdata = Wire(Vec(coreWidth, UInt(32.W)))
// Used for trace port, for debug purposes only
if (usingTrace) {
val rob_debug_inst_mem = SyncReadMem(numRobRows, Vec(coreWidth, UInt(32.W)))
rob_debug_inst_mem.write(rob_tail, rob_debug_inst_wdata, rob_debug_inst_wmask)
rob_debug_inst_rdata := rob_debug_inst_mem.read(rob_head, will_commit.reduce(_||_))
} else {
rob_debug_inst_rdata := DontCare
}
// Branch resolution
val brupdate_b2_rob_row = GetRowIdx(io.brupdate.b2.uop.rob_idx)
val brupdate_b2_rob_row_oh = UIntToOH(brupdate_b2_rob_row)
val brupdate_b2_rob_clr_oh = IsYoungerMask(brupdate_b2_rob_row, rob_head, numRobRows)
val brupdate_b2_rob_bank_idx = GetBankIdx(io.brupdate.b2.uop.rob_idx)
val brupdate_b2_rob_bank_clr_oh = ~MaskLower(UIntToOH(brupdate_b2_rob_bank_idx))
class RobCompactUop extends Bundle {
val is_fencei = Bool()
val ftq_idx = UInt(log2Ceil(ftqSz).W)
val uses_ldq = Bool()
val uses_stq = Bool()
val dst_rtype = UInt(2.W)
val ldst = UInt(lregSz.W)
val pdst = UInt(maxPregSz.W)
val stale_pdst = UInt(maxPregSz.W)
}
val compactUopWidth = 1 + log2Ceil(ftqSz) + 1 + 1 + 2 + lregSz + maxPregSz + maxPregSz
def compact_to_uop(compact: RobCompactUop, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.is_fencei := compact.is_fencei
out.ftq_idx := compact.ftq_idx
out.uses_ldq := compact.uses_ldq
out.uses_stq := compact.uses_stq
out.dst_rtype := compact.dst_rtype
out.ldst := compact.ldst
out.pdst := compact.pdst
out.stale_pdst := compact.stale_pdst
out
}
def uop_to_compact(uop: MicroOp): RobCompactUop = {
val out = Wire(new RobCompactUop)
out.is_fencei := uop.is_fencei
out.ftq_idx := uop.ftq_idx
out.uses_ldq := uop.uses_ldq
out.uses_stq := uop.uses_stq
out.dst_rtype := uop.dst_rtype
out.ldst := uop.ldst
out.pdst := uop.pdst
out.stale_pdst := uop.stale_pdst
out
}
// More efficient rob uop storage in 1R1W masked SRAM
val rob_compact_uop_mem = SyncReadMem(numRobRows, Vec(coreWidth, UInt(compactUopWidth.W)))
val rob_compact_uop_wdata = VecInit(io.enq_uops.map(u => uop_to_compact(u).asUInt))
rob_compact_uop_mem.write(rob_tail, rob_compact_uop_wdata, io.enq_valids)
val rob_compact_uop_rdata = rob_compact_uop_mem.read(next_rob_head)
val rob_compact_uop_might_bypass = rob_head === RegNext(rob_tail)
val rob_compact_uop_bypassed = (0 until coreWidth) map { w =>
Mux(rob_head === RegNext(rob_tail) && RegNext(io.enq_valids(w)),
RegNext(rob_compact_uop_wdata(w)),
Mux(rob_head === ShiftRegister(rob_tail, 2) && ShiftRegister(io.enq_valids(w), 2),
ShiftRegister(rob_compact_uop_wdata(w), 2),
rob_compact_uop_rdata(w)
)
).asTypeOf(new RobCompactUop)
}
val rob_fflags = Seq.fill(coreWidth)(Reg(Vec(numRobRows, UInt(freechips.rocketchip.tile.FPConstants.FLAGS_SZ.W))))
for (w <- 0 until coreWidth) {
def MatchBank(bank_idx: UInt): Bool = (bank_idx === w.U)
// one bank
val rob_val = RegInit(VecInit(Seq.fill(numRobRows){false.B}))
val rob_bsy = Reg(Vec(numRobRows, Bool()))
val rob_unsafe = Reg(Vec(numRobRows, Bool()))
val rob_uop = Reg(Vec(numRobRows, new MicroOp()))
val rob_exception = Reg(Vec(numRobRows, Bool()))
val rob_predicated = Reg(Vec(numRobRows, Bool())) // Was this instruction predicated out?
val rob_fflags = Reg(Vec(numRobRows, Valid(Bits(freechips.rocketchip.tile.FPConstants.FLAGS_SZ.W))))
val rob_debug_wdata = Mem(numRobRows, UInt(xLen.W))
//-----------------------------------------------
// Dispatch: Add Entry to ROB
rob_debug_inst_wmask(w) := io.enq_valids(w)
rob_debug_inst_wdata(w) := io.enq_uops(w).debug_inst
when (io.enq_valids(w)) {
rob_val(rob_tail) := true.B
rob_bsy(rob_tail) := io.enq_uops(w).starts_bsy
rob_unsafe(rob_tail) := io.enq_uops(w).starts_unsafe
rob_uop(rob_tail) := io.enq_uops(w)
rob_exception(rob_tail) := io.enq_uops(w).exception
rob_predicated(rob_tail) := false.B
rob_fflags(rob_tail).valid := false.B
rob_fflags(rob_tail).bits := 0.U
assert (rob_val(rob_tail) === false.B, "[rob] overwriting a valid entry.")
assert ((io.enq_uops(w).rob_idx >> log2Ceil(coreWidth)) === rob_tail)
} .elsewhen (io.enq_valids.reduce(_|_) && !rob_val(rob_tail)) {
}
//-----------------------------------------------
// Writeback
for (i <- 0 until numWakeupPorts) {
val wb_resp = io.wb_resps(i)
val wb_uop = wb_resp.bits.uop
val row_idx = GetRowIdx(wb_uop.rob_idx)
when (wb_resp.valid && MatchBank(GetBankIdx(wb_uop.rob_idx))) {
rob_bsy(row_idx) := false.B
rob_unsafe(row_idx) := false.B
rob_predicated(row_idx) := wb_resp.bits.predicated
when (wb_resp.bits.fflags.valid) {
assert(!rob_fflags(row_idx).valid)
rob_fflags(row_idx).valid := true.B
rob_fflags(row_idx).bits := wb_resp.bits.fflags.bits
}
}
}
// Stores have a separate method to clear busy bits
for (clr_rob_idx <- io.lsu_clr_bsy) {
when (clr_rob_idx.valid && MatchBank(GetBankIdx(clr_rob_idx.bits))) {
val cidx = GetRowIdx(clr_rob_idx.bits)
rob_bsy(cidx) := false.B
rob_unsafe(cidx) := false.B
assert (rob_val(cidx) === true.B, "[rob] store writing back to invalid entry.")
assert (rob_bsy(cidx) === true.B, "[rob] store writing back to a not-busy entry.")
}
}
for (clr <- io.lsu_clr_unsafe) {
when (clr.valid && MatchBank(GetBankIdx(clr.bits))) {
val cidx = GetRowIdx(clr.bits)
rob_unsafe(cidx) := false.B
}
}
//-----------------------------------------------------
// Exceptions
// (the cause bits are compressed and stored elsewhere)
when (io.lxcpt.valid && MatchBank(GetBankIdx(io.lxcpt.bits.uop.rob_idx))) {
rob_exception(GetRowIdx(io.lxcpt.bits.uop.rob_idx)) := true.B
when (io.lxcpt.bits.cause =/= MINI_EXCEPTION_MEM_ORDERING) {
// In the case of a mem-ordering failure, the failing load will have been marked safe already.
assert(rob_unsafe(GetRowIdx(io.lxcpt.bits.uop.rob_idx)),
"An instruction marked as safe is causing an exception")
}
}
when (io.csr_replay.valid && MatchBank(GetBankIdx(io.csr_replay.bits.uop.rob_idx))) {
rob_exception(GetRowIdx(io.csr_replay.bits.uop.rob_idx)) := true.B
}
can_throw_exception(w) := rob_val(rob_head) && rob_exception(rob_head)
//-----------------------------------------------
// Commit
// Can this instruction commit? (the check for exceptions/rob_state happens later).
// Block commit if there is mispredict
can_commit(w) := rob_val(rob_head) && !(rob_bsy(rob_head)) && !io.csr_stall && !io.brupdate.b2.mispredict
// use the same "com_uop" for both rollback AND commit
// Perform Commit
io.commit.valids(w) := will_commit(w)
io.commit.arch_valids(w) := will_commit(w) && !rob_predicated(rob_head)
io.commit.uops(w) := compact_to_uop(rob_compact_uop_bypassed(w), rob_uop(rob_head))
io.commit.debug_insts(w) := rob_debug_inst_rdata(w)
// We unbusy branches in b1, but its easier to mark the taken/provider src in b2,
// when the branch might be committing
when (io.brupdate.b2.mispredict &&
MatchBank(GetBankIdx(io.brupdate.b2.uop.rob_idx)) &&
GetRowIdx(io.brupdate.b2.uop.rob_idx) === rob_head) {
io.commit.uops(w).debug_fsrc := BSRC_C
io.commit.uops(w).taken := io.brupdate.b2.taken
}
when (rob_state === s_rollback) {
for (i <- 0 until numRobRows) {
rob_val(i) := false.B
rob_bsy(i) := false.B
}
}
// -----------------------------------------------
// Kill speculated entries on branch mispredict
for (i <- 0 until numRobRows) {
val br_mask = rob_uop(i).br_mask
when (io.brupdate.b2.mispredict && (
brupdate_b2_rob_clr_oh(i) ||
(brupdate_b2_rob_row_oh(i) && brupdate_b2_rob_bank_clr_oh(w))
)) {
rob_val(i) := false.B
}
// //kill instruction if mispredict & br mask match
// when (IsKilledByBranch(io.brupdate, false.B, br_mask))
// {
// rob_val(i) := false.B
// } .elsewhen (rob_val(i)) {
// // clear speculation bit even on correct speculation
// rob_uop(i).br_mask := GetNewBrMask(io.brupdate, br_mask)
// }
}
// Debug signal to figure out which prediction structure
// or core resolved a branch correctly
when (io.brupdate.b2.mispredict &&
MatchBank(GetBankIdx(io.brupdate.b2.uop.rob_idx))) {
rob_uop(GetRowIdx(io.brupdate.b2.uop.rob_idx)).debug_fsrc := BSRC_C
rob_uop(GetRowIdx(io.brupdate.b2.uop.rob_idx)).taken := io.brupdate.b2.taken
}
// -----------------------------------------------
// Commit
when (will_commit(w)) {
rob_val(rob_head) := false.B
}
// -----------------------------------------------
// Outputs
rob_head_vals(w) := rob_val(rob_head)
rob_tail_vals(w) := rob_val(rob_tail)
rob_head_fflags(w) := rob_fflags(rob_head)
rob_head_uses_stq(w) := io.commit.uops(w).uses_stq
rob_head_uses_ldq(w) := io.commit.uops(w).uses_ldq
//------------------------------------------------
// Invalid entries are safe; thrown exceptions are unsafe.
for (i <- 0 until numRobRows) {
rob_unsafe_masked((i << log2Ceil(coreWidth)) + w) := rob_val(i) && (rob_unsafe(i) || rob_exception(i))
}
// Read unsafe status of PNR row.
rob_pnr_unsafe(w) := rob_val(rob_pnr) && (rob_unsafe(rob_pnr) || rob_exception(rob_pnr))
//--------------------------------------------------
// Debug: for debug purposes, track side-effects to all register destinations
for (i <- 0 until numWakeupPorts) {
val rob_idx = io.wb_resps(i).bits.uop.rob_idx
when (io.wb_resps(i).valid && MatchBank(GetBankIdx(rob_idx))) {
rob_debug_wdata(GetRowIdx(rob_idx)) := io.wb_resps(i).bits.data
}
val temp_uop = rob_uop(GetRowIdx(rob_idx))
assert (!(io.wb_resps(i).valid && MatchBank(GetBankIdx(rob_idx)) &&
!rob_val(GetRowIdx(rob_idx))),
"[rob] writeback (" + i + ") occurred to an invalid ROB entry.")
assert (!(io.wb_resps(i).valid && MatchBank(GetBankIdx(rob_idx)) &&
!rob_bsy(GetRowIdx(rob_idx))),
"[rob] writeback (" + i + ") occurred to a not-busy ROB entry.")
assert (!(io.wb_resps(i).valid && MatchBank(GetBankIdx(rob_idx)) &&
temp_uop.dst_rtype =/= RT_X && temp_uop.pdst =/= io.wb_resps(i).bits.uop.pdst),
"[rob] writeback (" + i + ") occurred to the wrong pdst.")
}
io.commit.debug_wdata(w) := rob_debug_wdata(rob_head)
} //for (w <- 0 until coreWidth)
// **************************************************************************
// --------------------------------------------------------------------------
// **************************************************************************
// -----------------------------------------------
// Commit Logic
// need to take a "can_commit" array, and let the first can_commits commit
// previous instructions may block the commit of younger instructions in the commit bundle
// e.g., exception, or (valid && busy).
// Finally, don't throw an exception if there are instructions in front of
// it that want to commit (only throw exception when head of the bundle).
var block_commit = (rob_state =/= s_normal) && (rob_state =/= s_wait_till_empty) || RegNext(exception_thrown) || RegNext(RegNext(exception_thrown))
var will_throw_exception = false.B
var block_xcpt = false.B
for (w <- 0 until coreWidth) {
will_throw_exception = (can_throw_exception(w) && !block_commit && !block_xcpt) || will_throw_exception
will_commit(w) := can_commit(w) && !can_throw_exception(w) && !block_commit
block_commit = (rob_head_vals(w) &&
(!can_commit(w) || can_throw_exception(w))) || block_commit
block_xcpt = will_commit(w)
}
// Note: exception must be in the commit bundle.
// Note: exception must be the first valid instruction in the commit bundle.
exception_thrown := will_throw_exception
val is_mini_exception = io.com_xcpt.bits.cause.isOneOf(MINI_EXCEPTION_MEM_ORDERING, MINI_EXCEPTION_CSR_REPLAY)
io.com_xcpt.valid := exception_thrown && !is_mini_exception
io.com_xcpt.bits := DontCare
io.com_xcpt.bits.cause := r_xcpt_uop.exc_cause
io.com_xcpt.bits.badvaddr := Sext(r_xcpt_badvaddr, xLen)
val insn_sys_pc2epc =
rob_head_vals.reduce(_|_) && PriorityMux(rob_head_vals, io.commit.uops.map{u => u.is_sys_pc2epc})
val refetch_inst = exception_thrown || insn_sys_pc2epc
val com_xcpt_uop = PriorityMux(rob_head_vals, io.commit.uops)
io.com_xcpt.bits.ftq_idx := com_xcpt_uop.ftq_idx
io.com_xcpt.bits.edge_inst := com_xcpt_uop.edge_inst
io.com_xcpt.bits.is_rvc := com_xcpt_uop.is_rvc
io.com_xcpt.bits.pc_lob := com_xcpt_uop.pc_lob
val flush_commit_mask = Range(0,coreWidth).map{i => io.commit.valids(i) && io.commit.uops(i).flush_on_commit}
val flush_commit = flush_commit_mask.reduce(_|_)
val flush_val = exception_thrown || flush_commit
assert(!(PopCount(flush_commit_mask) > 1.U),
"[rob] Can't commit multiple flush_on_commit instructions on one cycle")
val flush_uop = Mux(exception_thrown, com_xcpt_uop, Mux1H(flush_commit_mask, io.commit.uops))
// delay a cycle for critical path considerations
io.flush.valid := flush_val
io.flush.bits.badvaddr := DontCare
io.flush.bits.cause := DontCare
io.flush.bits.ftq_idx := flush_uop.ftq_idx
io.flush.bits.pc_lob := flush_uop.pc_lob
io.flush.bits.edge_inst := flush_uop.edge_inst
io.flush.bits.is_rvc := flush_uop.is_rvc
io.flush.bits.flush_typ := FlushTypes.getType(flush_val,
exception_thrown && !is_mini_exception,
flush_commit && flush_uop.is_eret,
refetch_inst)
io.rollback := rob_state === s_rollback
// -----------------------------------------------
// FP Exceptions
// send fflags bits to the CSRFile to accrue
val fflags_val = Wire(Vec(coreWidth, Bool()))
val fflags = Wire(Vec(coreWidth, UInt(freechips.rocketchip.tile.FPConstants.FLAGS_SZ.W)))
for (w <- 0 until coreWidth) {
fflags_val(w) := rob_head_fflags(w).valid && io.commit.valids(w)
fflags(w) := Mux(fflags_val(w), rob_head_fflags(w).bits, 0.U)
assert (!(io.commit.valids(w) &&
io.commit.uops(w).fp_val &&
!(io.commit.uops(w).uses_stq || io.commit.uops(w).uses_ldq) &&
!rob_head_fflags(w).valid),
"Committed FP instruction did not set fflag bits")
assert (!(io.commit.valids(w) &&
!io.commit.uops(w).fp_val &&
rob_head_fflags(w).valid),
"Committed non-FP instruction has non-zero fflag bits.")
assert (!(io.commit.valids(w) &&
io.commit.uops(w).fp_val &&
(io.commit.uops(w).uses_ldq || io.commit.uops(w).uses_stq) &&
(rob_head_fflags(w).bits =/= 0.U && rob_head_fflags(w).valid)),
"Committed FP load or store has non-zero fflag bits.")
}
io.commit.fflags.valid := fflags_val.reduce(_|_)
io.commit.fflags.bits := fflags.reduce(_|_)
// -----------------------------------------------
// Exception Tracking Logic
// only store the oldest exception, since only one can happen!
val next_xcpt_uop = Wire(new MicroOp())
next_xcpt_uop := r_xcpt_uop
val enq_xcpts = Wire(Vec(coreWidth, Bool()))
for (i <- 0 until coreWidth) {
enq_xcpts(i) := io.enq_valids(i) && io.enq_uops(i).exception
}
when (!(io.flush.valid || exception_thrown)) {
val new_xcpt_valid = io.lxcpt.valid || io.csr_replay.valid
val lxcpt_older = !io.csr_replay.valid || (IsOlder(io.lxcpt.bits.uop.rob_idx, io.csr_replay.bits.uop.rob_idx, rob_head_idx) && io.lxcpt.valid)
val new_xcpt = Mux(lxcpt_older, io.lxcpt.bits, io.csr_replay.bits)
when (new_xcpt_valid) {
when (!r_xcpt_val || IsOlder(new_xcpt.uop.rob_idx, r_xcpt_uop.rob_idx, rob_head_idx)) {
r_xcpt_val := true.B
next_xcpt_uop := new_xcpt.uop
next_xcpt_uop.exc_cause := new_xcpt.cause
r_xcpt_badvaddr := new_xcpt.badvaddr
}
} .elsewhen (!r_xcpt_val && enq_xcpts.reduce(_|_)) {
val idx = enq_xcpts.indexWhere{i: Bool => i}
// if no exception yet, dispatch exception wins
r_xcpt_val := true.B
next_xcpt_uop := io.enq_uops(idx)
r_xcpt_badvaddr := AlignPCToBoundary(io.xcpt_fetch_pc, icBlockBytes) | io.enq_uops(idx).pc_lob
}
}
r_xcpt_uop := next_xcpt_uop
r_xcpt_uop.br_mask := GetNewBrMask(io.brupdate, next_xcpt_uop)
when (IsKilledByBranch(io.brupdate, io.flush.valid, next_xcpt_uop)) {
r_xcpt_val := false.B
}
assert (!(exception_thrown && !r_xcpt_val),
"ROB trying to throw an exception, but it doesn't have a valid xcpt_cause")
assert (!(empty && r_xcpt_val),
"ROB is empty, but believes it has an outstanding exception.")
assert (!(will_throw_exception && (GetRowIdx(r_xcpt_uop.rob_idx) =/= rob_head)),
"ROB is throwing an exception, but the stored exception information's " +
"rob_idx does not match the rob_head")
// -----------------------------------------------
// ROB Head Logic
// remember if we're still waiting on the rest of the dispatch packet, and prevent
// the rob_head from advancing if it commits a partial parket before we
// dispatch the rest of it.
// update when committed ALL valid instructions in commit_bundle
val r_partial_row = RegInit(false.B)
val finished_committing_row =
(io.commit.valids.asUInt =/= 0.U) &&
((will_commit.asUInt ^ rob_head_vals.asUInt) === 0.U) &&
!(r_partial_row && rob_head === rob_tail && !io.brupdate.b2.mispredict)
when (finished_committing_row) {
next_rob_head := WrapInc(rob_head, numRobRows)
rob_head_lsb := 0.U
} .elsewhen (rob_state === s_rollback) {
rob_head_lsb := 0.U
} .otherwise {
rob_head_lsb := OHToUInt(PriorityEncoderOH(rob_head_vals.asUInt))
}
// -----------------------------------------------
// ROB Point-of-No-Return (PNR) Logic
// Acts as a second head, but only waits on busy instructions which might cause misspeculation.
// TODO is it worth it to add an extra 'parity' bit to all rob pointer logic?
// Makes 'older than' comparisons ~3x cheaper, in case we're going to use the PNR to do a large number of those.
// Also doesn't require the rob tail (or head) to be exported to whatever we want to compare with the PNR.
if (enableFastPNR) {
val unsafe_entry_in_rob = rob_unsafe_masked.reduce(_||_)
val next_rob_pnr_idx = Mux(unsafe_entry_in_rob,
AgePriorityEncoder(rob_unsafe_masked, rob_head_idx),
rob_tail << log2Ceil(coreWidth) | PriorityEncoder(~rob_tail_vals.asUInt))
rob_pnr := next_rob_pnr_idx >> log2Ceil(coreWidth)
if (coreWidth > 1)
rob_pnr_lsb := next_rob_pnr_idx(log2Ceil(coreWidth)-1, 0)
} else {
val safe_to_inc = rob_state === s_normal || rob_state === s_wait_till_empty
val do_inc_row = !rob_pnr_unsafe.reduce(_||_) && !(rob_pnr === rob_tail && !io.brupdate.b2.mispredict)
when (rob_state === s_rollback) {
assert(rob_pnr === rob_head)
rob_pnr_lsb := 0.U
} .elsewhen (empty && io.enq_valids.asUInt =/= 0.U) {
// Unforunately for us, the ROB does not use its entries in monotonically
// increasing order, even in the case of no exceptions. The edge case
// arises when partial rows are enqueued and committed, leaving an empty
// ROB.
rob_pnr := rob_head
rob_pnr_lsb := PriorityEncoder(io.enq_valids)
} .elsewhen (safe_to_inc && do_inc_row) {
rob_pnr := WrapInc(rob_pnr, numRobRows)
rob_pnr_lsb := 0.U
} .elsewhen (safe_to_inc && (rob_pnr =/= rob_tail)) {
rob_pnr_lsb := PriorityEncoder(rob_pnr_unsafe)
} .elsewhen (safe_to_inc && !full && !empty) {
rob_pnr_lsb := PriorityEncoder(rob_pnr_unsafe.asUInt | ~MaskLower(rob_tail_vals.asUInt))
}
}
// Head overrunning PNR likely means an entry hasn't been marked as safe when it should have been.
assert(!IsOlder(rob_pnr_idx, rob_head_idx, rob_tail_idx) || rob_pnr_idx === rob_tail_idx)
// PNR overrunning tail likely means an entry has been marked as safe when it shouldn't have been.
assert(!IsOlder(rob_tail_idx, rob_pnr_idx, rob_head_idx) || full)
// -----------------------------------------------
// ROB Tail Logic
when (io.brupdate.b2.mispredict) {
rob_tail := WrapInc(GetRowIdx(io.brupdate.b2.uop.rob_idx), numRobRows)
rob_tail_lsb := 0.U
r_partial_row := false.B
} .elsewhen (io.enq_valids.asUInt =/= 0.U && !io.enq_partial_stall) {
rob_tail := WrapInc(rob_tail, numRobRows)
rob_tail_lsb := 0.U
r_partial_row := false.B
} .elsewhen (io.enq_valids.asUInt =/= 0.U && io.enq_partial_stall) {
rob_tail_lsb := PriorityEncoder(~MaskLower(io.enq_valids.asUInt))
r_partial_row := true.B
}
// -----------------------------------------------
// Full/Empty Logic
full := WrapInc(rob_tail, numRobRows) === rob_head
empty := (rob_head === rob_tail) && (rob_head_vals.asUInt === 0.U)
io.rob_head_idx := rob_head_idx
io.rob_tail_idx := rob_tail_idx
io.rob_pnr_idx := rob_pnr_idx
io.empty := empty
io.ready := (rob_state === s_normal) && !full && !r_xcpt_val
//-----------------------------------------------
//-----------------------------------------------
//-----------------------------------------------
// ROB FSM
switch (rob_state) {
is (s_reset) {
rob_state := s_normal
}
is (s_normal) {
when (RegNext(RegNext(exception_thrown))) {
rob_state := s_rollback
} .otherwise {
for (w <- 0 until coreWidth) {
when (io.enq_valids(w) && io.enq_uops(w).is_unique) {
rob_state := s_wait_till_empty
}
}
}
}
is (s_rollback) {
rob_tail := rob_head
rob_tail_lsb := 0.U
rob_state := s_normal
}
is (s_wait_till_empty) {
when (RegNext(RegNext(exception_thrown))) {
rob_state := s_rollback
} .elsewhen (empty) {
rob_state := s_normal
}
}
}
// -----------------------------------------------
// Outputs
io.com_load_is_at_rob_head := RegNext(rob_head_uses_ldq(PriorityEncoder(rob_head_vals.asUInt)) &&
!will_commit.reduce(_||_))
override def toString: String = BoomCoreStringPrefix(
"==ROB==",
"Machine Width : " + coreWidth,
"Rob Entries : " + numRobEntries,
"Rob Rows : " + numRobRows,
"Rob Row size : " + log2Ceil(numRobRows),
"log2Ceil(coreWidth): " + log2Ceil(coreWidth))
}
| module rob_compact_uop_mem( // @[rob.scala:337:40]
input [4:0] R0_addr,
input R0_clk,
output [59:0] R0_data,
input [4:0] W0_addr,
input W0_clk,
input [59:0] W0_data,
input [1:0] W0_mask
);
rob_compact_uop_mem_ext rob_compact_uop_mem_ext ( // @[rob.scala:337:40]
.R0_addr (R0_addr),
.R0_en (1'h1), // @[rob.scala:337:40]
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (1'h1), // @[rob.scala:337:40]
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
); // @[rob.scala:337:40]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File MSHR.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module MSHR_34( // @[MSHR.scala:84:7]
input clock, // @[MSHR.scala:84:7]
input reset, // @[MSHR.scala:84:7]
input io_allocate_valid, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_1, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_2, // @[MSHR.scala:86:14]
input io_allocate_bits_control, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14]
input [8:0] io_allocate_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14]
input [10:0] io_allocate_bits_set, // @[MSHR.scala:86:14]
input io_allocate_bits_repeat, // @[MSHR.scala:86:14]
input io_directory_valid, // @[MSHR.scala:86:14]
input io_directory_bits_dirty, // @[MSHR.scala:86:14]
input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14]
input io_directory_bits_clients, // @[MSHR.scala:86:14]
input [8:0] io_directory_bits_tag, // @[MSHR.scala:86:14]
input io_directory_bits_hit, // @[MSHR.scala:86:14]
input [3:0] io_directory_bits_way, // @[MSHR.scala:86:14]
output io_status_valid, // @[MSHR.scala:86:14]
output [10:0] io_status_bits_set, // @[MSHR.scala:86:14]
output [8:0] io_status_bits_tag, // @[MSHR.scala:86:14]
output [3:0] io_status_bits_way, // @[MSHR.scala:86:14]
output io_status_bits_blockB, // @[MSHR.scala:86:14]
output io_status_bits_nestB, // @[MSHR.scala:86:14]
output io_status_bits_blockC, // @[MSHR.scala:86:14]
output io_status_bits_nestC, // @[MSHR.scala:86:14]
input io_schedule_ready, // @[MSHR.scala:86:14]
output io_schedule_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_a_valid, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14]
output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14]
output io_schedule_bits_b_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14]
output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14]
output io_schedule_bits_c_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14]
output io_schedule_bits_d_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14]
output io_schedule_bits_e_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14]
output io_schedule_bits_x_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14]
output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14]
output io_schedule_bits_reload, // @[MSHR.scala:86:14]
input io_sinkc_valid, // @[MSHR.scala:86:14]
input io_sinkc_bits_last, // @[MSHR.scala:86:14]
input [10:0] io_sinkc_bits_set, // @[MSHR.scala:86:14]
input [8:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14]
input io_sinkc_bits_data, // @[MSHR.scala:86:14]
input io_sinkd_valid, // @[MSHR.scala:86:14]
input io_sinkd_bits_last, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14]
input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14]
input io_sinkd_bits_denied, // @[MSHR.scala:86:14]
input io_sinke_valid, // @[MSHR.scala:86:14]
input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14]
input [10:0] io_nestedwb_set, // @[MSHR.scala:86:14]
input [8:0] io_nestedwb_tag, // @[MSHR.scala:86:14]
input io_nestedwb_b_toN, // @[MSHR.scala:86:14]
input io_nestedwb_b_toB, // @[MSHR.scala:86:14]
input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14]
input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14]
);
wire [8:0] final_meta_writeback_tag; // @[MSHR.scala:215:38]
wire final_meta_writeback_clients; // @[MSHR.scala:215:38]
wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38]
wire final_meta_writeback_dirty; // @[MSHR.scala:215:38]
wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7]
wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7]
wire [8:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7]
wire [10:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7]
wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7]
wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7]
wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7]
wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7]
wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7]
wire [8:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7]
wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7]
wire [3:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7]
wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7]
wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7]
wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7]
wire [10:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7]
wire [8:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7]
wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7]
wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7]
wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7]
wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7]
wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7]
wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7]
wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7]
wire [10:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7]
wire [8:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7]
wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7]
wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7]
wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68]
wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80]
wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21]
wire invalid_clients = 1'h0; // @[MSHR.scala:268:21]
wire _excluded_client_T = 1'h0; // @[MSHR.scala:279:38]
wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137]
wire _excluded_client_T_9 = 1'h0; // @[MSHR.scala:279:57]
wire excluded_client = 1'h0; // @[MSHR.scala:279:28]
wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire allocate_as_full_prio_0 = 1'h0; // @[MSHR.scala:504:34]
wire new_request_prio_0 = 1'h0; // @[MSHR.scala:506:24]
wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137]
wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _io_schedule_bits_b_bits_clients_T = 1'h1; // @[MSHR.scala:289:53]
wire _last_probe_T_1 = 1'h1; // @[MSHR.scala:459:66]
wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7]
wire [8:0] invalid_tag = 9'h0; // @[MSHR.scala:268:21]
wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21]
wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70]
wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34]
wire [8:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34]
wire [10:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34]
wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40]
wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93]
wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28]
wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39]
wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105]
wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55]
wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91]
wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41]
wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41]
wire [8:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41]
wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51]
wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64]
wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41]
wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41]
wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57]
wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41]
wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43]
wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40]
wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66]
wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41]
wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41]
wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41]
wire [8:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41]
wire no_wait; // @[MSHR.scala:183:83]
wire [10:0] io_status_bits_set_0; // @[MSHR.scala:84:7]
wire [8:0] io_status_bits_tag_0; // @[MSHR.scala:84:7]
wire [3:0] io_status_bits_way_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockB_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestB_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockC_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestC_0; // @[MSHR.scala:84:7]
wire io_status_valid_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7]
wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7]
wire io_schedule_valid_0; // @[MSHR.scala:84:7]
reg request_valid; // @[MSHR.scala:97:30]
assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30]
reg request_prio_1; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20]
reg request_prio_2; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20]
reg request_control; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_opcode; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_param; // @[MSHR.scala:98:20]
reg [2:0] request_size; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_source; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20]
reg [8:0] request_tag; // @[MSHR.scala:98:20]
assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_offset; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_put; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20]
reg [10:0] request_set; // @[MSHR.scala:98:20]
assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
reg meta_valid; // @[MSHR.scala:99:27]
reg meta_dirty; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17]
reg [1:0] meta_state; // @[MSHR.scala:100:17]
reg meta_clients; // @[MSHR.scala:100:17]
wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39]
assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients; // @[MSHR.scala:100:17, :289:51]
wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire _last_probe_T_2 = meta_clients; // @[MSHR.scala:100:17, :459:64]
reg [8:0] meta_tag; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17]
reg meta_hit; // @[MSHR.scala:100:17]
reg [3:0] meta_way; // @[MSHR.scala:100:17]
assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
wire [3:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38]
reg s_rprobe; // @[MSHR.scala:121:33]
reg w_rprobeackfirst; // @[MSHR.scala:122:33]
reg w_rprobeacklast; // @[MSHR.scala:123:33]
reg s_release; // @[MSHR.scala:124:33]
reg w_releaseack; // @[MSHR.scala:125:33]
reg s_pprobe; // @[MSHR.scala:126:33]
reg s_acquire; // @[MSHR.scala:127:33]
reg s_flush; // @[MSHR.scala:128:33]
reg w_grantfirst; // @[MSHR.scala:129:33]
reg w_grantlast; // @[MSHR.scala:130:33]
reg w_grant; // @[MSHR.scala:131:33]
reg w_pprobeackfirst; // @[MSHR.scala:132:33]
reg w_pprobeacklast; // @[MSHR.scala:133:33]
reg w_pprobeack; // @[MSHR.scala:134:33]
reg s_grantack; // @[MSHR.scala:136:33]
reg s_execute; // @[MSHR.scala:137:33]
reg w_grantack; // @[MSHR.scala:138:33]
reg s_writeback; // @[MSHR.scala:139:33]
reg [2:0] sink; // @[MSHR.scala:147:17]
assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17]
reg gotT; // @[MSHR.scala:148:17]
reg bad_grant; // @[MSHR.scala:149:22]
assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22]
reg probes_done; // @[MSHR.scala:150:24]
reg probes_toN; // @[MSHR.scala:151:23]
reg probes_noT; // @[MSHR.scala:152:23]
wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28]
wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45]
wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62]
wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}]
wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82]
wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}]
wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103]
wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}]
assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}]
assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40]
wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39]
wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}]
wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}]
wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96]
assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}]
assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93]
assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28]
assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28]
wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43]
wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64]
wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}]
wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85]
wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}]
assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}]
assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39]
wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33]
wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}]
wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}]
assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}]
assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83]
wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31]
wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}]
assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}]
assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55]
wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31]
wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44]
assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}]
assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41]
wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32]
wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}]
assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}]
assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64]
wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31]
wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}]
assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}]
assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57]
wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31]
assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}]
assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43]
wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31]
assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}]
assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40]
wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34]
wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}]
wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70]
wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}]
assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}]
assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66]
wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49]
wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}]
wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}]
wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49]
wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}]
assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}]
assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105]
wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71]
wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71]
wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71]
wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire [8:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71]
wire final_meta_writeback_hit; // @[MSHR.scala:215:38]
wire req_clientBit = request_source == 6'h28; // @[Parameters.scala:46:9]
wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12]
wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12]
wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _req_needT_T_2; // @[Parameters.scala:270:13]
assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13]
wire _excluded_client_T_6; // @[Parameters.scala:279:117]
assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117]
wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42]
wire _req_needT_T_3; // @[Parameters.scala:270:42]
assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42]
wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11]
assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11]
wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42]
wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _req_needT_T_6; // @[Parameters.scala:271:14]
assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14]
wire _req_acquire_T; // @[MSHR.scala:219:36]
assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14]
wire _excluded_client_T_1; // @[Parameters.scala:279:12]
assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12]
wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52]
wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89]
wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52]
wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}]
wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}]
wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81]
wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}]
wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}]
wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}]
wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65]
wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}]
wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55]
wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78]
wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78]
assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78]
wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70]
wire _evict_T_2; // @[MSHR.scala:317:26]
assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _before_T_1; // @[MSHR.scala:317:26]
assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}]
wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}]
wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43]
assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43]
wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75]
wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}]
wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}]
wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54]
wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}]
wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45]
wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}]
wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}]
wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40]
wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40]
assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40]
wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65]
assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65]
wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41]
wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}]
wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72]
wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}]
wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70]
wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70]
wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53]
assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53]
wire _evict_T_1; // @[MSHR.scala:317:26]
assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire _before_T; // @[MSHR.scala:317:26]
assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70]
wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70]
wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55]
wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70]
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70]
wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66]
wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}]
wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}]
wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40]
assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30]
wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54]
wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}]
assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21]
assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21]
assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36]
assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36]
wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9]
wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}]
wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}]
wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}]
wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}]
wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}]
wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56]
wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70]
assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}]
wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51]
wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55]
wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52]
wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}]
wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}]
assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38]
assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91]
wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42]
wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70]
wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}]
assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}]
assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41]
wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42]
assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}]
assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41]
assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51]
assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41]
assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41]
assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}]
assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41]
wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42]
wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53]
wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53]
wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89]
wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79]
assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41]
wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42]
assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 9'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}]
assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41]
wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32]
wire [3:0] evict; // @[MSHR.scala:314:26]
wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32]
wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32]
wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32]
assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32]
assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39]
wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39]
assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39]
assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76]
wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76]
assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76]
assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32]
assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] before_0; // @[MSHR.scala:314:26]
wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32]
wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11]
assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] after; // @[MSHR.scala:314:26]
wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26]
wire _after_T; // @[MSHR.scala:317:26]
assign _after_T = _GEN_9; // @[MSHR.scala:317:26]
wire _prior_T; // @[MSHR.scala:317:26]
assign _prior_T = _GEN_9; // @[MSHR.scala:317:26]
wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32]
wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26]
wire _after_T_1; // @[MSHR.scala:317:26]
assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire _prior_T_1; // @[MSHR.scala:317:26]
assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32]
wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32]
assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32]
assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39]
wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39]
assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39]
assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76]
wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76]
assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76]
assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26]
wire _after_T_3; // @[MSHR.scala:317:26]
assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26]
wire _prior_T_3; // @[MSHR.scala:317:26]
assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26]
assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire probe_bit = io_sinkc_bits_source_0 == 6'h28; // @[Parameters.scala:46:9]
wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9]
wire _last_probe_T; // @[MSHR.scala:459:33]
assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33]
wire _probes_done_T; // @[MSHR.scala:467:32]
assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32]
wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}]
wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11]
wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43]
wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75]
wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9]
wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}]
wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53]
wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}]
wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42]
wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55]
wire _w_rprobeacklast_T; // @[MSHR.scala:471:55]
assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55]
wire _w_pprobeacklast_T; // @[MSHR.scala:473:55]
assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55]
wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}]
wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42]
wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}]
wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77]
wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}]
wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}]
wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32]
wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33]
wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}]
wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35]
wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40]
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [8:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [3:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [8:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [10:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12]
wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _new_needT_T_2; // @[Parameters.scala:270:13]
assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13]
wire _new_skipProbe_T_5; // @[Parameters.scala:279:117]
assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117]
wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42]
wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _new_needT_T_6; // @[Parameters.scala:271:14]
assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14]
wire _new_skipProbe_T; // @[Parameters.scala:279:12]
assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12]
wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52]
wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89]
wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire new_clientBit = new_request_source == 6'h28; // @[Parameters.scala:46:9]
wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}]
wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}]
wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}]
wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9]
wire [3:0] prior; // @[MSHR.scala:314:26]
wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32]
wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_92( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_148 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_37( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class
RecFNToRecFN(
inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int)
extends chisel3.RawModule
{
val io = IO(new Bundle {
val in = Input(Bits((inExpWidth + inSigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in);
if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
io.out := io.in<<(outSigWidth - inSigWidth)
io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W)
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
inExpWidth,
inSigWidth,
outExpWidth,
outSigWidth,
flRoundOpt_sigMSBitAlwaysZero
))
roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn)
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := rawIn
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module RecFNToRecFN_149( // @[RecFNToRecFN.scala:44:5]
input [32:0] io_in, // @[RecFNToRecFN.scala:48:16]
output [32:0] io_out // @[RecFNToRecFN.scala:48:16]
);
wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5]
wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35]
wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54]
wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5]
wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5]
wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35]
wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}]
wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46]
assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54]
assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File PMP.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Cat, log2Ceil}
import org.chipsalliance.cde.config._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
class PMPConfig extends Bundle {
val l = Bool()
val res = UInt(2.W)
val a = UInt(2.W)
val x = Bool()
val w = Bool()
val r = Bool()
}
object PMP {
def lgAlign = 2
def apply(reg: PMPReg): PMP = {
val pmp = Wire(new PMP()(reg.p))
pmp.cfg := reg.cfg
pmp.addr := reg.addr
pmp.mask := pmp.computeMask
pmp
}
}
class PMPReg(implicit p: Parameters) extends CoreBundle()(p) {
val cfg = new PMPConfig
val addr = UInt((paddrBits - PMP.lgAlign).W)
def reset(): Unit = {
cfg.a := 0.U
cfg.l := 0.U
}
def readAddr = if (pmpGranularity.log2 == PMP.lgAlign) addr else {
val mask = ((BigInt(1) << (pmpGranularity.log2 - PMP.lgAlign)) - 1).U
Mux(napot, addr | (mask >> 1), ~(~addr | mask))
}
def napot = cfg.a(1)
def torNotNAPOT = cfg.a(0)
def tor = !napot && torNotNAPOT
def cfgLocked = cfg.l
def addrLocked(next: PMPReg) = cfgLocked || next.cfgLocked && next.tor
}
class PMP(implicit p: Parameters) extends PMPReg {
val mask = UInt(paddrBits.W)
import PMP._
def computeMask = {
val base = Cat(addr, cfg.a(0)) | ((pmpGranularity - 1).U >> lgAlign)
Cat(base & ~(base + 1.U), ((1 << lgAlign) - 1).U)
}
private def comparand = ~(~(addr << lgAlign) | (pmpGranularity - 1).U)
private def pow2Match(x: UInt, lgSize: UInt, lgMaxSize: Int) = {
def eval(a: UInt, b: UInt, m: UInt) = ((a ^ b) & ~m) === 0.U
if (lgMaxSize <= pmpGranularity.log2) {
eval(x, comparand, mask)
} else {
// break up the circuit; the MSB part will be CSE'd
val lsbMask = mask | UIntToOH1(lgSize, lgMaxSize)
val msbMatch = eval(x >> lgMaxSize, comparand >> lgMaxSize, mask >> lgMaxSize)
val lsbMatch = eval(x(lgMaxSize-1, 0), comparand(lgMaxSize-1, 0), lsbMask(lgMaxSize-1, 0))
msbMatch && lsbMatch
}
}
private def boundMatch(x: UInt, lsbMask: UInt, lgMaxSize: Int) = {
if (lgMaxSize <= pmpGranularity.log2) {
x < comparand
} else {
// break up the circuit; the MSB part will be CSE'd
val msbsLess = (x >> lgMaxSize) < (comparand >> lgMaxSize)
val msbsEqual = ((x >> lgMaxSize) ^ (comparand >> lgMaxSize)) === 0.U
val lsbsLess = (x(lgMaxSize-1, 0) | lsbMask) < comparand(lgMaxSize-1, 0)
msbsLess || (msbsEqual && lsbsLess)
}
}
private def lowerBoundMatch(x: UInt, lgSize: UInt, lgMaxSize: Int) =
!boundMatch(x, UIntToOH1(lgSize, lgMaxSize), lgMaxSize)
private def upperBoundMatch(x: UInt, lgMaxSize: Int) =
boundMatch(x, 0.U, lgMaxSize)
private def rangeMatch(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP) =
prev.lowerBoundMatch(x, lgSize, lgMaxSize) && upperBoundMatch(x, lgMaxSize)
private def pow2Homogeneous(x: UInt, pgLevel: UInt) = {
val maskHomogeneous = pgLevelMap { idxBits => if (idxBits > paddrBits) false.B else mask(idxBits - 1) } (pgLevel)
maskHomogeneous || (pgLevelMap { idxBits => ((x ^ comparand) >> idxBits) =/= 0.U } (pgLevel))
}
private def pgLevelMap[T](f: Int => T) = (0 until pgLevels).map { i =>
f(pgIdxBits + (pgLevels - 1 - i) * pgLevelBits)
}
private def rangeHomogeneous(x: UInt, pgLevel: UInt, prev: PMP) = {
val beginsAfterLower = !(x < prev.comparand)
val beginsAfterUpper = !(x < comparand)
val pgMask = pgLevelMap { idxBits => (((BigInt(1) << paddrBits) - (BigInt(1) << idxBits)) max 0).U } (pgLevel)
val endsBeforeLower = (x & pgMask) < (prev.comparand & pgMask)
val endsBeforeUpper = (x & pgMask) < (comparand & pgMask)
endsBeforeLower || beginsAfterUpper || (beginsAfterLower && endsBeforeUpper)
}
// returns whether this PMP completely contains, or contains none of, a page
def homogeneous(x: UInt, pgLevel: UInt, prev: PMP): Bool =
Mux(napot, pow2Homogeneous(x, pgLevel), !torNotNAPOT || rangeHomogeneous(x, pgLevel, prev))
// returns whether this matching PMP fully contains the access
def aligned(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP): Bool = if (lgMaxSize <= pmpGranularity.log2) true.B else {
val lsbMask = UIntToOH1(lgSize, lgMaxSize)
val straddlesLowerBound = ((x >> lgMaxSize) ^ (prev.comparand >> lgMaxSize)) === 0.U && (prev.comparand(lgMaxSize-1, 0) & ~x(lgMaxSize-1, 0)) =/= 0.U
val straddlesUpperBound = ((x >> lgMaxSize) ^ (comparand >> lgMaxSize)) === 0.U && (comparand(lgMaxSize-1, 0) & (x(lgMaxSize-1, 0) | lsbMask)) =/= 0.U
val rangeAligned = !(straddlesLowerBound || straddlesUpperBound)
val pow2Aligned = (lsbMask & ~mask(lgMaxSize-1, 0)) === 0.U
Mux(napot, pow2Aligned, rangeAligned)
}
// returns whether this PMP matches at least one byte of the access
def hit(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP): Bool =
Mux(napot, pow2Match(x, lgSize, lgMaxSize), torNotNAPOT && rangeMatch(x, lgSize, lgMaxSize, prev))
}
class PMPHomogeneityChecker(pmps: Seq[PMP])(implicit p: Parameters) {
def apply(addr: UInt, pgLevel: UInt): Bool = {
pmps.foldLeft((true.B, 0.U.asTypeOf(new PMP))) { case ((h, prev), pmp) =>
(h && pmp.homogeneous(addr, pgLevel, prev), pmp)
}._1
}
}
class PMPChecker(lgMaxSize: Int)(implicit val p: Parameters) extends Module
with HasCoreParameters {
override def desiredName = s"PMPChecker_s${lgMaxSize}"
val io = IO(new Bundle {
val prv = Input(UInt(PRV.SZ.W))
val pmp = Input(Vec(nPMPs, new PMP))
val addr = Input(UInt(paddrBits.W))
val size = Input(UInt(log2Ceil(lgMaxSize + 1).W))
val r = Output(Bool())
val w = Output(Bool())
val x = Output(Bool())
})
val default = if (io.pmp.isEmpty) true.B else io.prv > PRV.S.U
val pmp0 = WireInit(0.U.asTypeOf(new PMP))
pmp0.cfg.r := default
pmp0.cfg.w := default
pmp0.cfg.x := default
val res = (io.pmp zip (pmp0 +: io.pmp)).reverse.foldLeft(pmp0) { case (prev, (pmp, prevPMP)) =>
val hit = pmp.hit(io.addr, io.size, lgMaxSize, prevPMP)
val ignore = default && !pmp.cfg.l
val aligned = pmp.aligned(io.addr, io.size, lgMaxSize, prevPMP)
for ((name, idx) <- Seq("no", "TOR", if (pmpGranularity <= 4) "NA4" else "", "NAPOT").zipWithIndex; if name.nonEmpty)
property.cover(pmp.cfg.a === idx.U, s"The cfg access is set to ${name} access ", "Cover PMP access mode setting")
property.cover(pmp.cfg.l === 0x1.U, s"The cfg lock is set to high ", "Cover PMP lock mode setting")
// Not including Write and no Read permission as the combination is reserved
for ((name, idx) <- Seq("no", "RO", "", "RW", "X", "RX", "", "RWX").zipWithIndex; if name.nonEmpty)
property.cover((Cat(pmp.cfg.x, pmp.cfg.w, pmp.cfg.r) === idx.U), s"The permission is set to ${name} access ", "Cover PMP access permission setting")
for ((name, idx) <- Seq("", "TOR", if (pmpGranularity <= 4) "NA4" else "", "NAPOT").zipWithIndex; if name.nonEmpty) {
property.cover(!ignore && hit && aligned && pmp.cfg.a === idx.U, s"The access matches ${name} mode ", "Cover PMP access")
property.cover(pmp.cfg.l && hit && aligned && pmp.cfg.a === idx.U, s"The access matches ${name} mode with lock bit high", "Cover PMP access with lock bit")
}
val cur = WireInit(pmp)
cur.cfg.r := aligned && (pmp.cfg.r || ignore)
cur.cfg.w := aligned && (pmp.cfg.w || ignore)
cur.cfg.x := aligned && (pmp.cfg.x || ignore)
Mux(hit, cur, prev)
}
io.r := res.cfg.r
io.w := res.cfg.w
io.x := res.cfg.x
}
File Replacement.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import freechips.rocketchip.util.property.cover
abstract class ReplacementPolicy {
def nBits: Int
def perSet: Boolean
def way: UInt
def miss: Unit
def hit: Unit
def access(touch_way: UInt): Unit
def access(touch_ways: Seq[Valid[UInt]]): Unit
def state_read: UInt
def get_next_state(state: UInt, touch_way: UInt): UInt
def get_next_state(state: UInt, touch_ways: Seq[Valid[UInt]]): UInt = {
touch_ways.foldLeft(state)((prev, touch_way) => Mux(touch_way.valid, get_next_state(prev, touch_way.bits), prev))
}
def get_replace_way(state: UInt): UInt
}
object ReplacementPolicy {
def fromString(s: String, n_ways: Int): ReplacementPolicy = s.toLowerCase match {
case "random" => new RandomReplacement(n_ways)
case "lru" => new TrueLRU(n_ways)
case "plru" => new PseudoLRU(n_ways)
case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t")
}
}
class RandomReplacement(n_ways: Int) extends ReplacementPolicy {
private val replace = Wire(Bool())
replace := false.B
def nBits = 16
def perSet = false
private val lfsr = LFSR(nBits, replace)
def state_read = WireDefault(lfsr)
def way = Random(n_ways, lfsr)
def miss = replace := true.B
def hit = {}
def access(touch_way: UInt) = {}
def access(touch_ways: Seq[Valid[UInt]]) = {}
def get_next_state(state: UInt, touch_way: UInt) = 0.U //DontCare
def get_replace_way(state: UInt) = way
}
abstract class SeqReplacementPolicy {
def access(set: UInt): Unit
def update(valid: Bool, hit: Bool, set: UInt, way: UInt): Unit
def way: UInt
}
abstract class SetAssocReplacementPolicy {
def access(set: UInt, touch_way: UInt): Unit
def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]): Unit
def way(set: UInt): UInt
}
class SeqRandom(n_ways: Int) extends SeqReplacementPolicy {
val logic = new RandomReplacement(n_ways)
def access(set: UInt) = { }
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
when (valid && !hit) { logic.miss }
}
def way = logic.way
}
class TrueLRU(n_ways: Int) extends ReplacementPolicy {
// True LRU replacement policy, using a triangular matrix to track which sets are more recently used than others.
// The matrix is packed into a single UInt (or Bits). Example 4-way (6-bits):
// [5] - 3 more recent than 2
// [4] - 3 more recent than 1
// [3] - 2 more recent than 1
// [2] - 3 more recent than 0
// [1] - 2 more recent than 0
// [0] - 1 more recent than 0
def nBits = (n_ways * (n_ways-1)) / 2
def perSet = true
private val state_reg = RegInit(0.U(nBits.W))
def state_read = WireDefault(state_reg)
private def extractMRUVec(state: UInt): Seq[UInt] = {
// Extract per-way information about which higher-indexed ways are more recently used
val moreRecentVec = Wire(Vec(n_ways-1, UInt(n_ways.W)))
var lsb = 0
for (i <- 0 until n_ways-1) {
moreRecentVec(i) := Cat(state(lsb+n_ways-i-2,lsb), 0.U((i+1).W))
lsb = lsb + (n_ways - i - 1)
}
moreRecentVec
}
def get_next_state(state: UInt, touch_way: UInt): UInt = {
val nextState = Wire(Vec(n_ways-1, UInt(n_ways.W)))
val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix
val wayDec = UIntToOH(touch_way, n_ways)
// Compute next value of triangular matrix
// set the touched way as more recent than every other way
nextState.zipWithIndex.map { case (e, i) =>
e := Mux(i.U === touch_way, 0.U(n_ways.W), moreRecentVec(i) | wayDec)
}
nextState.zipWithIndex.tail.foldLeft((nextState.head.apply(n_ways-1,1),0)) { case ((pe,pi),(ce,ci)) => (Cat(ce.apply(n_ways-1,ci+1), pe), ci) }._1
}
def access(touch_way: UInt): Unit = {
state_reg := get_next_state(state_reg, touch_way)
}
def access(touch_ways: Seq[Valid[UInt]]): Unit = {
when (touch_ways.map(_.valid).orR) {
state_reg := get_next_state(state_reg, touch_ways)
}
for (i <- 1 until touch_ways.size) {
cover(PopCount(touch_ways.map(_.valid)) === i.U, s"LRU_UpdateCount$i", s"LRU Update $i simultaneous")
}
}
def get_replace_way(state: UInt): UInt = {
val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix
// For each way, determine if all other ways are more recent
val mruWayDec = (0 until n_ways).map { i =>
val upperMoreRecent = (if (i == n_ways-1) true.B else moreRecentVec(i).apply(n_ways-1,i+1).andR)
val lowerMoreRecent = (if (i == 0) true.B else moreRecentVec.map(e => !e(i)).reduce(_ && _))
upperMoreRecent && lowerMoreRecent
}
OHToUInt(mruWayDec)
}
def way = get_replace_way(state_reg)
def miss = access(way)
def hit = {}
@deprecated("replace 'replace' with 'way' from abstract class ReplacementPolicy","Rocket Chip 2020.05")
def replace: UInt = way
}
class PseudoLRU(n_ways: Int) extends ReplacementPolicy {
// Pseudo-LRU tree algorithm: https://en.wikipedia.org/wiki/Pseudo-LRU#Tree-PLRU
//
//
// - bits storage example for 4-way PLRU binary tree:
// bit[2]: ways 3+2 older than ways 1+0
// / \
// bit[1]: way 3 older than way 2 bit[0]: way 1 older than way 0
//
//
// - bits storage example for 3-way PLRU binary tree:
// bit[1]: way 2 older than ways 1+0
// \
// bit[0]: way 1 older than way 0
//
//
// - bits storage example for 8-way PLRU binary tree:
// bit[6]: ways 7-4 older than ways 3-0
// / \
// bit[5]: ways 7+6 > 5+4 bit[2]: ways 3+2 > 1+0
// / \ / \
// bit[4]: way 7>6 bit[3]: way 5>4 bit[1]: way 3>2 bit[0]: way 1>0
def nBits = n_ways - 1
def perSet = true
private val state_reg = if (nBits == 0) Reg(UInt(0.W)) else RegInit(0.U(nBits.W))
def state_read = WireDefault(state_reg)
def access(touch_way: UInt): Unit = {
state_reg := get_next_state(state_reg, touch_way)
}
def access(touch_ways: Seq[Valid[UInt]]): Unit = {
when (touch_ways.map(_.valid).orR) {
state_reg := get_next_state(state_reg, touch_ways)
}
for (i <- 1 until touch_ways.size) {
cover(PopCount(touch_ways.map(_.valid)) === i.U, s"PLRU_UpdateCount$i", s"PLRU Update $i simultaneous")
}
}
/** @param state state_reg bits for this sub-tree
* @param touch_way touched way encoded value bits for this sub-tree
* @param tree_nways number of ways in this sub-tree
*/
def get_next_state(state: UInt, touch_way: UInt, tree_nways: Int): UInt = {
require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways")
require(touch_way.getWidth == (log2Ceil(tree_nways) max 1), s"wrong encoded way width ${touch_way.getWidth} for $tree_nways ways")
if (tree_nways > 2) {
// we are at a branching node in the tree, so recurse
val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree
val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree
val set_left_older = !touch_way(log2Ceil(tree_nways)-1)
val left_subtree_state = state.extract(tree_nways-3, right_nways-1)
val right_subtree_state = state(right_nways-2, 0)
if (left_nways > 1) {
// we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees
Cat(set_left_older,
Mux(set_left_older,
left_subtree_state, // if setting left sub-tree as older, do NOT recurse into left sub-tree
get_next_state(left_subtree_state, touch_way.extract(log2Ceil(left_nways)-1,0), left_nways)), // recurse left if newer
Mux(set_left_older,
get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer
right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree
} else {
// we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree
Cat(set_left_older,
Mux(set_left_older,
get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer
right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree
}
} else if (tree_nways == 2) {
// we are at a leaf node at the end of the tree, so set the single state bit opposite of the lsb of the touched way encoded value
!touch_way(0)
} else { // tree_nways <= 1
// we are at an empty node in an empty tree for 1 way, so return single zero bit for Chisel (no zero-width wires)
0.U(1.W)
}
}
def get_next_state(state: UInt, touch_way: UInt): UInt = {
val touch_way_sized = if (touch_way.getWidth < log2Ceil(n_ways)) touch_way.padTo (log2Ceil(n_ways))
else touch_way.extract(log2Ceil(n_ways)-1,0)
get_next_state(state, touch_way_sized, n_ways)
}
/** @param state state_reg bits for this sub-tree
* @param tree_nways number of ways in this sub-tree
*/
def get_replace_way(state: UInt, tree_nways: Int): UInt = {
require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways")
// this algorithm recursively descends the binary tree, filling in the way-to-replace encoded value from msb to lsb
if (tree_nways > 2) {
// we are at a branching node in the tree, so recurse
val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree
val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree
val left_subtree_older = state(tree_nways-2)
val left_subtree_state = state.extract(tree_nways-3, right_nways-1)
val right_subtree_state = state(right_nways-2, 0)
if (left_nways > 1) {
// we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees
Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value
Mux(left_subtree_older, // if left sub-tree is older, recurse left, else recurse right
get_replace_way(left_subtree_state, left_nways), // recurse left
get_replace_way(right_subtree_state, right_nways))) // recurse right
} else {
// we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree
Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value
Mux(left_subtree_older, // if left sub-tree is older, return and do not recurse right
0.U(1.W),
get_replace_way(right_subtree_state, right_nways))) // recurse right
}
} else if (tree_nways == 2) {
// we are at a leaf node at the end of the tree, so just return the single state bit as lsb of the way-to-replace encoded value
state(0)
} else { // tree_nways <= 1
// we are at an empty node in an unbalanced tree for non-power-of-2 ways, so return single zero bit as lsb of the way-to-replace encoded value
0.U(1.W)
}
}
def get_replace_way(state: UInt): UInt = get_replace_way(state, n_ways)
def way = get_replace_way(state_reg)
def miss = access(way)
def hit = {}
}
class SeqPLRU(n_sets: Int, n_ways: Int) extends SeqReplacementPolicy {
val logic = new PseudoLRU(n_ways)
val state = SyncReadMem(n_sets, UInt(logic.nBits.W))
val current_state = Wire(UInt(logic.nBits.W))
val next_state = Wire(UInt(logic.nBits.W))
val plru_way = logic.get_replace_way(current_state)
def access(set: UInt) = {
current_state := state.read(set)
}
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
val update_way = Mux(hit, way, plru_way)
next_state := logic.get_next_state(current_state, update_way)
when (valid) { state.write(set, next_state) }
}
def way = plru_way
}
class SetAssocLRU(n_sets: Int, n_ways: Int, policy: String) extends SetAssocReplacementPolicy {
val logic = policy.toLowerCase match {
case "plru" => new PseudoLRU(n_ways)
case "lru" => new TrueLRU(n_ways)
case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t")
}
val state_vec =
if (logic.nBits == 0) Reg(Vec(n_sets, UInt(logic.nBits.W))) // Work around elaboration error on following line
else RegInit(VecInit(Seq.fill(n_sets)(0.U(logic.nBits.W))))
def access(set: UInt, touch_way: UInt) = {
state_vec(set) := logic.get_next_state(state_vec(set), touch_way)
}
def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]) = {
require(sets.size == touch_ways.size, "internal consistency check: should be same number of simultaneous updates for sets and touch_ways")
for (set <- 0 until n_sets) {
val set_touch_ways = (sets zip touch_ways).map { case (touch_set, touch_way) =>
Pipe(touch_way.valid && (touch_set === set.U), touch_way.bits, 0)}
when (set_touch_ways.map(_.valid).orR) {
state_vec(set) := logic.get_next_state(state_vec(set), set_touch_ways)
}
}
}
def way(set: UInt) = logic.get_replace_way(state_vec(set))
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class PLRUTest(n_ways: Int, timeout: Int = 500) extends UnitTest(timeout) {
val plru = new PseudoLRU(n_ways)
// step
io.finished := RegNext(true.B, false.B)
val get_replace_ways = (0 until (1 << (n_ways-1))).map(state =>
plru.get_replace_way(state = state.U((n_ways-1).W)))
val get_next_states = (0 until (1 << (n_ways-1))).map(state => (0 until n_ways).map(way =>
plru.get_next_state (state = state.U((n_ways-1).W), touch_way = way.U(log2Ceil(n_ways).W))))
n_ways match {
case 2 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_next_states(0)(0) === 1.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=1 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 0.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=0 actual=%d", get_next_states(0)(1))
assert(get_next_states(1)(0) === 1.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=1 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 0.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=0 actual=%d", get_next_states(1)(1))
}
case 3 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_replace_ways(2) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=2 actual=%d", get_replace_ways(2))
assert(get_replace_ways(3) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=2 actual=%d", get_replace_ways(3))
assert(get_next_states(0)(0) === 3.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=3 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 2.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=2 actual=%d", get_next_states(0)(1))
assert(get_next_states(0)(2) === 0.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=0 actual=%d", get_next_states(0)(2))
assert(get_next_states(1)(0) === 3.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=3 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 2.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=2 actual=%d", get_next_states(1)(1))
assert(get_next_states(1)(2) === 1.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=1 actual=%d", get_next_states(1)(2))
assert(get_next_states(2)(0) === 3.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=3 actual=%d", get_next_states(2)(0))
assert(get_next_states(2)(1) === 2.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=2 actual=%d", get_next_states(2)(1))
assert(get_next_states(2)(2) === 0.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=0 actual=%d", get_next_states(2)(2))
assert(get_next_states(3)(0) === 3.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=3 actual=%d", get_next_states(3)(0))
assert(get_next_states(3)(1) === 2.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=2 actual=%d", get_next_states(3)(1))
assert(get_next_states(3)(2) === 1.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=1 actual=%d", get_next_states(3)(2))
}
case 4 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_replace_ways(2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=0 actual=%d", get_replace_ways(2))
assert(get_replace_ways(3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=1 actual=%d", get_replace_ways(3))
assert(get_replace_ways(4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=4: expected=2 actual=%d", get_replace_ways(4))
assert(get_replace_ways(5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=5: expected=2 actual=%d", get_replace_ways(5))
assert(get_replace_ways(6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=6: expected=3 actual=%d", get_replace_ways(6))
assert(get_replace_ways(7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=7: expected=3 actual=%d", get_replace_ways(7))
assert(get_next_states(0)(0) === 5.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=5 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 4.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=4 actual=%d", get_next_states(0)(1))
assert(get_next_states(0)(2) === 2.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=2 actual=%d", get_next_states(0)(2))
assert(get_next_states(0)(3) === 0.U(plru.nBits.W), s"get_next_state state=0 way=3: expected=0 actual=%d", get_next_states(0)(3))
assert(get_next_states(1)(0) === 5.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=5 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 4.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=4 actual=%d", get_next_states(1)(1))
assert(get_next_states(1)(2) === 3.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=3 actual=%d", get_next_states(1)(2))
assert(get_next_states(1)(3) === 1.U(plru.nBits.W), s"get_next_state state=1 way=3: expected=1 actual=%d", get_next_states(1)(3))
assert(get_next_states(2)(0) === 7.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=7 actual=%d", get_next_states(2)(0))
assert(get_next_states(2)(1) === 6.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=6 actual=%d", get_next_states(2)(1))
assert(get_next_states(2)(2) === 2.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=2 actual=%d", get_next_states(2)(2))
assert(get_next_states(2)(3) === 0.U(plru.nBits.W), s"get_next_state state=2 way=3: expected=0 actual=%d", get_next_states(2)(3))
assert(get_next_states(3)(0) === 7.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=7 actual=%d", get_next_states(3)(0))
assert(get_next_states(3)(1) === 6.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=6 actual=%d", get_next_states(3)(1))
assert(get_next_states(3)(2) === 3.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=3 actual=%d", get_next_states(3)(2))
assert(get_next_states(3)(3) === 1.U(plru.nBits.W), s"get_next_state state=3 way=3: expected=1 actual=%d", get_next_states(3)(3))
assert(get_next_states(4)(0) === 5.U(plru.nBits.W), s"get_next_state state=4 way=0: expected=5 actual=%d", get_next_states(4)(0))
assert(get_next_states(4)(1) === 4.U(plru.nBits.W), s"get_next_state state=4 way=1: expected=4 actual=%d", get_next_states(4)(1))
assert(get_next_states(4)(2) === 2.U(plru.nBits.W), s"get_next_state state=4 way=2: expected=2 actual=%d", get_next_states(4)(2))
assert(get_next_states(4)(3) === 0.U(plru.nBits.W), s"get_next_state state=4 way=3: expected=0 actual=%d", get_next_states(4)(3))
assert(get_next_states(5)(0) === 5.U(plru.nBits.W), s"get_next_state state=5 way=0: expected=5 actual=%d", get_next_states(5)(0))
assert(get_next_states(5)(1) === 4.U(plru.nBits.W), s"get_next_state state=5 way=1: expected=4 actual=%d", get_next_states(5)(1))
assert(get_next_states(5)(2) === 3.U(plru.nBits.W), s"get_next_state state=5 way=2: expected=3 actual=%d", get_next_states(5)(2))
assert(get_next_states(5)(3) === 1.U(plru.nBits.W), s"get_next_state state=5 way=3: expected=1 actual=%d", get_next_states(5)(3))
assert(get_next_states(6)(0) === 7.U(plru.nBits.W), s"get_next_state state=6 way=0: expected=7 actual=%d", get_next_states(6)(0))
assert(get_next_states(6)(1) === 6.U(plru.nBits.W), s"get_next_state state=6 way=1: expected=6 actual=%d", get_next_states(6)(1))
assert(get_next_states(6)(2) === 2.U(plru.nBits.W), s"get_next_state state=6 way=2: expected=2 actual=%d", get_next_states(6)(2))
assert(get_next_states(6)(3) === 0.U(plru.nBits.W), s"get_next_state state=6 way=3: expected=0 actual=%d", get_next_states(6)(3))
assert(get_next_states(7)(0) === 7.U(plru.nBits.W), s"get_next_state state=7 way=0: expected=7 actual=%d", get_next_states(7)(0))
assert(get_next_states(7)(1) === 6.U(plru.nBits.W), s"get_next_state state=7 way=5: expected=6 actual=%d", get_next_states(7)(1))
assert(get_next_states(7)(2) === 3.U(plru.nBits.W), s"get_next_state state=7 way=2: expected=3 actual=%d", get_next_states(7)(2))
assert(get_next_states(7)(3) === 1.U(plru.nBits.W), s"get_next_state state=7 way=3: expected=1 actual=%d", get_next_states(7)(3))
}
case 5 => {
assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0))
assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1))
assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2))
assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3))
assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4))
assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5))
assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6))
assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7))
assert(get_replace_ways( 8) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=4 actual=%d", get_replace_ways( 8))
assert(get_replace_ways( 9) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=4 actual=%d", get_replace_ways( 9))
assert(get_replace_ways(10) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=4 actual=%d", get_replace_ways(10))
assert(get_replace_ways(11) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=4 actual=%d", get_replace_ways(11))
assert(get_replace_ways(12) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=4 actual=%d", get_replace_ways(12))
assert(get_replace_ways(13) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=4 actual=%d", get_replace_ways(13))
assert(get_replace_ways(14) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=4 actual=%d", get_replace_ways(14))
assert(get_replace_ways(15) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=4 actual=%d", get_replace_ways(15))
assert(get_next_states( 0)(0) === 13.U(plru.nBits.W), s"get_next_state state=00 way=0: expected=13 actual=%d", get_next_states( 0)(0))
assert(get_next_states( 0)(1) === 12.U(plru.nBits.W), s"get_next_state state=00 way=1: expected=12 actual=%d", get_next_states( 0)(1))
assert(get_next_states( 0)(2) === 10.U(plru.nBits.W), s"get_next_state state=00 way=2: expected=10 actual=%d", get_next_states( 0)(2))
assert(get_next_states( 0)(3) === 8.U(plru.nBits.W), s"get_next_state state=00 way=3: expected=08 actual=%d", get_next_states( 0)(3))
assert(get_next_states( 0)(4) === 0.U(plru.nBits.W), s"get_next_state state=00 way=4: expected=00 actual=%d", get_next_states( 0)(4))
assert(get_next_states( 1)(0) === 13.U(plru.nBits.W), s"get_next_state state=01 way=0: expected=13 actual=%d", get_next_states( 1)(0))
assert(get_next_states( 1)(1) === 12.U(plru.nBits.W), s"get_next_state state=01 way=1: expected=12 actual=%d", get_next_states( 1)(1))
assert(get_next_states( 1)(2) === 11.U(plru.nBits.W), s"get_next_state state=01 way=2: expected=11 actual=%d", get_next_states( 1)(2))
assert(get_next_states( 1)(3) === 9.U(plru.nBits.W), s"get_next_state state=01 way=3: expected=09 actual=%d", get_next_states( 1)(3))
assert(get_next_states( 1)(4) === 1.U(plru.nBits.W), s"get_next_state state=01 way=4: expected=01 actual=%d", get_next_states( 1)(4))
assert(get_next_states( 2)(0) === 15.U(plru.nBits.W), s"get_next_state state=02 way=0: expected=15 actual=%d", get_next_states( 2)(0))
assert(get_next_states( 2)(1) === 14.U(plru.nBits.W), s"get_next_state state=02 way=1: expected=14 actual=%d", get_next_states( 2)(1))
assert(get_next_states( 2)(2) === 10.U(plru.nBits.W), s"get_next_state state=02 way=2: expected=10 actual=%d", get_next_states( 2)(2))
assert(get_next_states( 2)(3) === 8.U(plru.nBits.W), s"get_next_state state=02 way=3: expected=08 actual=%d", get_next_states( 2)(3))
assert(get_next_states( 2)(4) === 2.U(plru.nBits.W), s"get_next_state state=02 way=4: expected=02 actual=%d", get_next_states( 2)(4))
assert(get_next_states( 3)(0) === 15.U(plru.nBits.W), s"get_next_state state=03 way=0: expected=15 actual=%d", get_next_states( 3)(0))
assert(get_next_states( 3)(1) === 14.U(plru.nBits.W), s"get_next_state state=03 way=1: expected=14 actual=%d", get_next_states( 3)(1))
assert(get_next_states( 3)(2) === 11.U(plru.nBits.W), s"get_next_state state=03 way=2: expected=11 actual=%d", get_next_states( 3)(2))
assert(get_next_states( 3)(3) === 9.U(plru.nBits.W), s"get_next_state state=03 way=3: expected=09 actual=%d", get_next_states( 3)(3))
assert(get_next_states( 3)(4) === 3.U(plru.nBits.W), s"get_next_state state=03 way=4: expected=03 actual=%d", get_next_states( 3)(4))
assert(get_next_states( 4)(0) === 13.U(plru.nBits.W), s"get_next_state state=04 way=0: expected=13 actual=%d", get_next_states( 4)(0))
assert(get_next_states( 4)(1) === 12.U(plru.nBits.W), s"get_next_state state=04 way=1: expected=12 actual=%d", get_next_states( 4)(1))
assert(get_next_states( 4)(2) === 10.U(plru.nBits.W), s"get_next_state state=04 way=2: expected=10 actual=%d", get_next_states( 4)(2))
assert(get_next_states( 4)(3) === 8.U(plru.nBits.W), s"get_next_state state=04 way=3: expected=08 actual=%d", get_next_states( 4)(3))
assert(get_next_states( 4)(4) === 4.U(plru.nBits.W), s"get_next_state state=04 way=4: expected=04 actual=%d", get_next_states( 4)(4))
assert(get_next_states( 5)(0) === 13.U(plru.nBits.W), s"get_next_state state=05 way=0: expected=13 actual=%d", get_next_states( 5)(0))
assert(get_next_states( 5)(1) === 12.U(plru.nBits.W), s"get_next_state state=05 way=1: expected=12 actual=%d", get_next_states( 5)(1))
assert(get_next_states( 5)(2) === 11.U(plru.nBits.W), s"get_next_state state=05 way=2: expected=11 actual=%d", get_next_states( 5)(2))
assert(get_next_states( 5)(3) === 9.U(plru.nBits.W), s"get_next_state state=05 way=3: expected=09 actual=%d", get_next_states( 5)(3))
assert(get_next_states( 5)(4) === 5.U(plru.nBits.W), s"get_next_state state=05 way=4: expected=05 actual=%d", get_next_states( 5)(4))
assert(get_next_states( 6)(0) === 15.U(plru.nBits.W), s"get_next_state state=06 way=0: expected=15 actual=%d", get_next_states( 6)(0))
assert(get_next_states( 6)(1) === 14.U(plru.nBits.W), s"get_next_state state=06 way=1: expected=14 actual=%d", get_next_states( 6)(1))
assert(get_next_states( 6)(2) === 10.U(plru.nBits.W), s"get_next_state state=06 way=2: expected=10 actual=%d", get_next_states( 6)(2))
assert(get_next_states( 6)(3) === 8.U(plru.nBits.W), s"get_next_state state=06 way=3: expected=08 actual=%d", get_next_states( 6)(3))
assert(get_next_states( 6)(4) === 6.U(plru.nBits.W), s"get_next_state state=06 way=4: expected=06 actual=%d", get_next_states( 6)(4))
assert(get_next_states( 7)(0) === 15.U(plru.nBits.W), s"get_next_state state=07 way=0: expected=15 actual=%d", get_next_states( 7)(0))
assert(get_next_states( 7)(1) === 14.U(plru.nBits.W), s"get_next_state state=07 way=5: expected=14 actual=%d", get_next_states( 7)(1))
assert(get_next_states( 7)(2) === 11.U(plru.nBits.W), s"get_next_state state=07 way=2: expected=11 actual=%d", get_next_states( 7)(2))
assert(get_next_states( 7)(3) === 9.U(plru.nBits.W), s"get_next_state state=07 way=3: expected=09 actual=%d", get_next_states( 7)(3))
assert(get_next_states( 7)(4) === 7.U(plru.nBits.W), s"get_next_state state=07 way=4: expected=07 actual=%d", get_next_states( 7)(4))
assert(get_next_states( 8)(0) === 13.U(plru.nBits.W), s"get_next_state state=08 way=0: expected=13 actual=%d", get_next_states( 8)(0))
assert(get_next_states( 8)(1) === 12.U(plru.nBits.W), s"get_next_state state=08 way=1: expected=12 actual=%d", get_next_states( 8)(1))
assert(get_next_states( 8)(2) === 10.U(plru.nBits.W), s"get_next_state state=08 way=2: expected=10 actual=%d", get_next_states( 8)(2))
assert(get_next_states( 8)(3) === 8.U(plru.nBits.W), s"get_next_state state=08 way=3: expected=08 actual=%d", get_next_states( 8)(3))
assert(get_next_states( 8)(4) === 0.U(plru.nBits.W), s"get_next_state state=08 way=4: expected=00 actual=%d", get_next_states( 8)(4))
assert(get_next_states( 9)(0) === 13.U(plru.nBits.W), s"get_next_state state=09 way=0: expected=13 actual=%d", get_next_states( 9)(0))
assert(get_next_states( 9)(1) === 12.U(plru.nBits.W), s"get_next_state state=09 way=1: expected=12 actual=%d", get_next_states( 9)(1))
assert(get_next_states( 9)(2) === 11.U(plru.nBits.W), s"get_next_state state=09 way=2: expected=11 actual=%d", get_next_states( 9)(2))
assert(get_next_states( 9)(3) === 9.U(plru.nBits.W), s"get_next_state state=09 way=3: expected=09 actual=%d", get_next_states( 9)(3))
assert(get_next_states( 9)(4) === 1.U(plru.nBits.W), s"get_next_state state=09 way=4: expected=01 actual=%d", get_next_states( 9)(4))
assert(get_next_states(10)(0) === 15.U(plru.nBits.W), s"get_next_state state=10 way=0: expected=15 actual=%d", get_next_states(10)(0))
assert(get_next_states(10)(1) === 14.U(plru.nBits.W), s"get_next_state state=10 way=1: expected=14 actual=%d", get_next_states(10)(1))
assert(get_next_states(10)(2) === 10.U(plru.nBits.W), s"get_next_state state=10 way=2: expected=10 actual=%d", get_next_states(10)(2))
assert(get_next_states(10)(3) === 8.U(plru.nBits.W), s"get_next_state state=10 way=3: expected=08 actual=%d", get_next_states(10)(3))
assert(get_next_states(10)(4) === 2.U(plru.nBits.W), s"get_next_state state=10 way=4: expected=02 actual=%d", get_next_states(10)(4))
assert(get_next_states(11)(0) === 15.U(plru.nBits.W), s"get_next_state state=11 way=0: expected=15 actual=%d", get_next_states(11)(0))
assert(get_next_states(11)(1) === 14.U(plru.nBits.W), s"get_next_state state=11 way=1: expected=14 actual=%d", get_next_states(11)(1))
assert(get_next_states(11)(2) === 11.U(plru.nBits.W), s"get_next_state state=11 way=2: expected=11 actual=%d", get_next_states(11)(2))
assert(get_next_states(11)(3) === 9.U(plru.nBits.W), s"get_next_state state=11 way=3: expected=09 actual=%d", get_next_states(11)(3))
assert(get_next_states(11)(4) === 3.U(plru.nBits.W), s"get_next_state state=11 way=4: expected=03 actual=%d", get_next_states(11)(4))
assert(get_next_states(12)(0) === 13.U(plru.nBits.W), s"get_next_state state=12 way=0: expected=13 actual=%d", get_next_states(12)(0))
assert(get_next_states(12)(1) === 12.U(plru.nBits.W), s"get_next_state state=12 way=1: expected=12 actual=%d", get_next_states(12)(1))
assert(get_next_states(12)(2) === 10.U(plru.nBits.W), s"get_next_state state=12 way=2: expected=10 actual=%d", get_next_states(12)(2))
assert(get_next_states(12)(3) === 8.U(plru.nBits.W), s"get_next_state state=12 way=3: expected=08 actual=%d", get_next_states(12)(3))
assert(get_next_states(12)(4) === 4.U(plru.nBits.W), s"get_next_state state=12 way=4: expected=04 actual=%d", get_next_states(12)(4))
assert(get_next_states(13)(0) === 13.U(plru.nBits.W), s"get_next_state state=13 way=0: expected=13 actual=%d", get_next_states(13)(0))
assert(get_next_states(13)(1) === 12.U(plru.nBits.W), s"get_next_state state=13 way=1: expected=12 actual=%d", get_next_states(13)(1))
assert(get_next_states(13)(2) === 11.U(plru.nBits.W), s"get_next_state state=13 way=2: expected=11 actual=%d", get_next_states(13)(2))
assert(get_next_states(13)(3) === 9.U(plru.nBits.W), s"get_next_state state=13 way=3: expected=09 actual=%d", get_next_states(13)(3))
assert(get_next_states(13)(4) === 5.U(plru.nBits.W), s"get_next_state state=13 way=4: expected=05 actual=%d", get_next_states(13)(4))
assert(get_next_states(14)(0) === 15.U(plru.nBits.W), s"get_next_state state=14 way=0: expected=15 actual=%d", get_next_states(14)(0))
assert(get_next_states(14)(1) === 14.U(plru.nBits.W), s"get_next_state state=14 way=1: expected=14 actual=%d", get_next_states(14)(1))
assert(get_next_states(14)(2) === 10.U(plru.nBits.W), s"get_next_state state=14 way=2: expected=10 actual=%d", get_next_states(14)(2))
assert(get_next_states(14)(3) === 8.U(plru.nBits.W), s"get_next_state state=14 way=3: expected=08 actual=%d", get_next_states(14)(3))
assert(get_next_states(14)(4) === 6.U(plru.nBits.W), s"get_next_state state=14 way=4: expected=06 actual=%d", get_next_states(14)(4))
assert(get_next_states(15)(0) === 15.U(plru.nBits.W), s"get_next_state state=15 way=0: expected=15 actual=%d", get_next_states(15)(0))
assert(get_next_states(15)(1) === 14.U(plru.nBits.W), s"get_next_state state=15 way=5: expected=14 actual=%d", get_next_states(15)(1))
assert(get_next_states(15)(2) === 11.U(plru.nBits.W), s"get_next_state state=15 way=2: expected=11 actual=%d", get_next_states(15)(2))
assert(get_next_states(15)(3) === 9.U(plru.nBits.W), s"get_next_state state=15 way=3: expected=09 actual=%d", get_next_states(15)(3))
assert(get_next_states(15)(4) === 7.U(plru.nBits.W), s"get_next_state state=15 way=4: expected=07 actual=%d", get_next_states(15)(4))
}
case 6 => {
assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0))
assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1))
assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2))
assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3))
assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4))
assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5))
assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6))
assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7))
assert(get_replace_ways( 8) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=0 actual=%d", get_replace_ways( 8))
assert(get_replace_ways( 9) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=1 actual=%d", get_replace_ways( 9))
assert(get_replace_ways(10) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=0 actual=%d", get_replace_ways(10))
assert(get_replace_ways(11) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=1 actual=%d", get_replace_ways(11))
assert(get_replace_ways(12) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=2 actual=%d", get_replace_ways(12))
assert(get_replace_ways(13) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=2 actual=%d", get_replace_ways(13))
assert(get_replace_ways(14) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=3 actual=%d", get_replace_ways(14))
assert(get_replace_ways(15) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=3 actual=%d", get_replace_ways(15))
assert(get_replace_ways(16) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=16: expected=4 actual=%d", get_replace_ways(16))
assert(get_replace_ways(17) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=17: expected=4 actual=%d", get_replace_ways(17))
assert(get_replace_ways(18) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=18: expected=4 actual=%d", get_replace_ways(18))
assert(get_replace_ways(19) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=19: expected=4 actual=%d", get_replace_ways(19))
assert(get_replace_ways(20) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=20: expected=4 actual=%d", get_replace_ways(20))
assert(get_replace_ways(21) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=21: expected=4 actual=%d", get_replace_ways(21))
assert(get_replace_ways(22) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=22: expected=4 actual=%d", get_replace_ways(22))
assert(get_replace_ways(23) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=23: expected=4 actual=%d", get_replace_ways(23))
assert(get_replace_ways(24) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=24: expected=5 actual=%d", get_replace_ways(24))
assert(get_replace_ways(25) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=25: expected=5 actual=%d", get_replace_ways(25))
assert(get_replace_ways(26) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=26: expected=5 actual=%d", get_replace_ways(26))
assert(get_replace_ways(27) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=27: expected=5 actual=%d", get_replace_ways(27))
assert(get_replace_ways(28) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=28: expected=5 actual=%d", get_replace_ways(28))
assert(get_replace_ways(29) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=29: expected=5 actual=%d", get_replace_ways(29))
assert(get_replace_ways(30) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=30: expected=5 actual=%d", get_replace_ways(30))
assert(get_replace_ways(31) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=31: expected=5 actual=%d", get_replace_ways(31))
}
case _ => throw new IllegalArgumentException(s"no test pattern found for n_ways=$n_ways")
}
}
File TLBPermissions.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes, RegionType, AddressDecoder}
import freechips.rocketchip.tilelink.TLManagerParameters
case class TLBPermissions(
homogeneous: Bool, // if false, the below are undefined
r: Bool, // readable
w: Bool, // writeable
x: Bool, // executable
c: Bool, // cacheable
a: Bool, // arithmetic ops
l: Bool) // logical ops
object TLBPageLookup
{
private case class TLBFixedPermissions(
e: Boolean, // get-/put-effects
r: Boolean, // readable
w: Boolean, // writeable
x: Boolean, // executable
c: Boolean, // cacheable
a: Boolean, // arithmetic ops
l: Boolean) { // logical ops
val useful = r || w || x || c || a || l
}
private def groupRegions(managers: Seq[TLManagerParameters]): Map[TLBFixedPermissions, Seq[AddressSet]] = {
val permissions = managers.map { m =>
(m.address, TLBFixedPermissions(
e = Seq(RegionType.PUT_EFFECTS, RegionType.GET_EFFECTS) contains m.regionType,
r = m.supportsGet || m.supportsAcquireB, // if cached, never uses Get
w = m.supportsPutFull || m.supportsAcquireT, // if cached, never uses Put
x = m.executable,
c = m.supportsAcquireB,
a = m.supportsArithmetic,
l = m.supportsLogical))
}
permissions
.filter(_._2.useful) // get rid of no-permission devices
.groupBy(_._2) // group by permission type
.mapValues(seq =>
AddressSet.unify(seq.flatMap(_._1))) // coalesce same-permission regions
.toMap
}
// Unmapped memory is considered to be inhomogeneous
def apply(managers: Seq[TLManagerParameters], xLen: Int, cacheBlockBytes: Int, pageSize: BigInt, maxRequestBytes: Int): UInt => TLBPermissions = {
require (isPow2(xLen) && xLen >= 8)
require (isPow2(cacheBlockBytes) && cacheBlockBytes >= xLen/8)
require (isPow2(pageSize) && pageSize >= cacheBlockBytes)
val xferSizes = TransferSizes(cacheBlockBytes, cacheBlockBytes)
val allSizes = TransferSizes(1, maxRequestBytes)
val amoSizes = TransferSizes(4, xLen/8)
val permissions = managers.foreach { m =>
require (!m.supportsGet || m.supportsGet .contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsGet} Get, but must support ${allSizes}")
require (!m.supportsPutFull || m.supportsPutFull .contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsPutFull} PutFull, but must support ${allSizes}")
require (!m.supportsPutPartial || m.supportsPutPartial.contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsPutPartial} PutPartial, but must support ${allSizes}")
require (!m.supportsAcquireB || m.supportsAcquireB .contains(xferSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsAcquireB} AcquireB, but must support ${xferSizes}")
require (!m.supportsAcquireT || m.supportsAcquireT .contains(xferSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsAcquireT} AcquireT, but must support ${xferSizes}")
require (!m.supportsLogical || m.supportsLogical .contains(amoSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsLogical} Logical, but must support ${amoSizes}")
require (!m.supportsArithmetic || m.supportsArithmetic.contains(amoSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsArithmetic} Arithmetic, but must support ${amoSizes}")
require (!(m.supportsAcquireB && m.supportsPutFull && !m.supportsAcquireT), s"Memory region '${m.name}' supports AcquireB (cached read) and PutFull (un-cached write) but not AcquireT (cached write)")
}
val grouped = groupRegions(managers)
.mapValues(_.filter(_.alignment >= pageSize)) // discard any region that's not big enough
def lowCostProperty(prop: TLBFixedPermissions => Boolean): UInt => Bool = {
val (yesm, nom) = grouped.partition { case (k, eq) => prop(k) }
val (yes, no) = (yesm.values.flatten.toList, nom.values.flatten.toList)
// Find the minimal bits needed to distinguish between yes and no
val decisionMask = AddressDecoder(Seq(yes, no))
def simplify(x: Seq[AddressSet]) = AddressSet.unify(x.map(_.widen(~decisionMask)).distinct)
val (yesf, nof) = (simplify(yes), simplify(no))
if (yesf.size < no.size) {
(x: UInt) => yesf.map(_.contains(x)).foldLeft(false.B)(_ || _)
} else {
(x: UInt) => !nof.map(_.contains(x)).foldLeft(false.B)(_ || _)
}
}
// Derive simplified property circuits (don't care when !homo)
val rfn = lowCostProperty(_.r)
val wfn = lowCostProperty(_.w)
val xfn = lowCostProperty(_.x)
val cfn = lowCostProperty(_.c)
val afn = lowCostProperty(_.a)
val lfn = lowCostProperty(_.l)
val homo = AddressSet.unify(grouped.values.flatten.toList)
(x: UInt) => TLBPermissions(
homogeneous = homo.map(_.contains(x)).foldLeft(false.B)(_ || _),
r = rfn(x),
w = wfn(x),
x = xfn(x),
c = cfn(x),
a = afn(x),
l = lfn(x))
}
// Are all pageSize intervals of mapped regions homogeneous?
def homogeneous(managers: Seq[TLManagerParameters], pageSize: BigInt): Boolean = {
groupRegions(managers).values.forall(_.forall(_.alignment >= pageSize))
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File PTW.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Arbiter, Cat, Decoupled, Enum, Mux1H, OHToUInt, PopCount, PriorityEncoder, PriorityEncoderOH, RegEnable, UIntToOH, Valid, is, isPow2, log2Ceil, switch}
import chisel3.withClock
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.subsystem.CacheBlockBytes
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
import scala.collection.mutable.ListBuffer
/** PTE request from TLB to PTW
*
* TLB send a PTE request to PTW when L1TLB miss
*/
class PTWReq(implicit p: Parameters) extends CoreBundle()(p) {
val addr = UInt(vpnBits.W)
val need_gpa = Bool()
val vstage1 = Bool()
val stage2 = Bool()
}
/** PTE info from L2TLB to TLB
*
* containing: target PTE, exceptions, two-satge tanslation info
*/
class PTWResp(implicit p: Parameters) extends CoreBundle()(p) {
/** ptw access exception */
val ae_ptw = Bool()
/** final access exception */
val ae_final = Bool()
/** page fault */
val pf = Bool()
/** guest page fault */
val gf = Bool()
/** hypervisor read */
val hr = Bool()
/** hypervisor write */
val hw = Bool()
/** hypervisor execute */
val hx = Bool()
/** PTE to refill L1TLB
*
* source: L2TLB
*/
val pte = new PTE
/** pte pglevel */
val level = UInt(log2Ceil(pgLevels).W)
/** fragmented_superpage support */
val fragmented_superpage = Bool()
/** homogeneous for both pma and pmp */
val homogeneous = Bool()
val gpa = Valid(UInt(vaddrBits.W))
val gpa_is_pte = Bool()
}
/** IO between TLB and PTW
*
* PTW receives :
* - PTE request
* - CSRs info
* - pmp results from PMP(in TLB)
*/
class TLBPTWIO(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreParameters {
val req = Decoupled(Valid(new PTWReq))
val resp = Flipped(Valid(new PTWResp))
val ptbr = Input(new PTBR())
val hgatp = Input(new PTBR())
val vsatp = Input(new PTBR())
val status = Input(new MStatus())
val hstatus = Input(new HStatus())
val gstatus = Input(new MStatus())
val pmp = Input(Vec(nPMPs, new PMP))
val customCSRs = Flipped(coreParams.customCSRs)
}
/** PTW performance statistics */
class PTWPerfEvents extends Bundle {
val l2miss = Bool()
val l2hit = Bool()
val pte_miss = Bool()
val pte_hit = Bool()
}
/** Datapath IO between PTW and Core
*
* PTW receives CSRs info, pmp checks, sfence instruction info
*
* PTW sends its performance statistics to core
*/
class DatapathPTWIO(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreParameters {
val ptbr = Input(new PTBR())
val hgatp = Input(new PTBR())
val vsatp = Input(new PTBR())
val sfence = Flipped(Valid(new SFenceReq))
val status = Input(new MStatus())
val hstatus = Input(new HStatus())
val gstatus = Input(new MStatus())
val pmp = Input(Vec(nPMPs, new PMP))
val perf = Output(new PTWPerfEvents())
val customCSRs = Flipped(coreParams.customCSRs)
/** enable clock generated by ptw */
val clock_enabled = Output(Bool())
}
/** PTE template for transmission
*
* contains useful methods to check PTE attributes
* @see RV-priv spec 4.3.1 for pgae table entry format
*/
class PTE(implicit p: Parameters) extends CoreBundle()(p) {
val reserved_for_future = UInt(10.W)
val ppn = UInt(44.W)
val reserved_for_software = Bits(2.W)
/** dirty bit */
val d = Bool()
/** access bit */
val a = Bool()
/** global mapping */
val g = Bool()
/** user mode accessible */
val u = Bool()
/** whether the page is executable */
val x = Bool()
/** whether the page is writable */
val w = Bool()
/** whether the page is readable */
val r = Bool()
/** valid bit */
val v = Bool()
/** return true if find a pointer to next level page table */
def table(dummy: Int = 0) = v && !r && !w && !x && !d && !a && !u && reserved_for_future === 0.U
/** return true if find a leaf PTE */
def leaf(dummy: Int = 0) = v && (r || (x && !w)) && a
/** user read */
def ur(dummy: Int = 0) = sr() && u
/** user write*/
def uw(dummy: Int = 0) = sw() && u
/** user execute */
def ux(dummy: Int = 0) = sx() && u
/** supervisor read */
def sr(dummy: Int = 0) = leaf() && r
/** supervisor write */
def sw(dummy: Int = 0) = leaf() && w && d
/** supervisor execute */
def sx(dummy: Int = 0) = leaf() && x
/** full permission: writable and executable in user mode */
def isFullPerm(dummy: Int = 0) = uw() && ux()
}
/** L2TLB PTE template
*
* contains tag bits
* @param nSets number of sets in L2TLB
* @see RV-priv spec 4.3.1 for page table entry format
*/
class L2TLBEntry(nSets: Int)(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreParameters {
val idxBits = log2Ceil(nSets)
val tagBits = maxSVAddrBits - pgIdxBits - idxBits + (if (usingHypervisor) 1 else 0)
val tag = UInt(tagBits.W)
val ppn = UInt(ppnBits.W)
/** dirty bit */
val d = Bool()
/** access bit */
val a = Bool()
/** user mode accessible */
val u = Bool()
/** whether the page is executable */
val x = Bool()
/** whether the page is writable */
val w = Bool()
/** whether the page is readable */
val r = Bool()
}
/** PTW contains L2TLB, and performs page table walk for high level TLB, and cache queries from L1 TLBs(I$, D$, RoCC)
*
* It performs hierarchy page table query to mem for the desired leaf PTE and cache them in l2tlb.
* Besides leaf PTEs, it also caches non-leaf PTEs in pte_cache to accerlerate the process.
*
* ==Structure==
* - l2tlb : for leaf PTEs
* - set-associative (configurable with [[CoreParams.nL2TLBEntries]]and [[CoreParams.nL2TLBWays]]))
* - PLRU
* - pte_cache: for non-leaf PTEs
* - set-associative
* - LRU
* - s2_pte_cache: for non-leaf PTEs in 2-stage translation
* - set-associative
* - PLRU
*
* l2tlb Pipeline: 3 stage
* {{{
* stage 0 : read
* stage 1 : decode
* stage 2 : hit check
* }}}
* ==State Machine==
* s_ready: ready to reveive request from TLB
* s_req: request mem; pte_cache hit judge
* s_wait1: deal with l2tlb error
* s_wait2: final hit judge
* s_wait3: receive mem response
* s_fragment_superpage: for superpage PTE
*
* @note l2tlb hit happens in s_req or s_wait1
* @see RV-priv spec 4.3-4.6 for Virtual-Memory System
* @see RV-priv spec 8.5 for Two-Stage Address Translation
* @todo details in two-stage translation
*/
class PTW(n: Int)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) {
val io = IO(new Bundle {
/** to n TLB */
val requestor = Flipped(Vec(n, new TLBPTWIO))
/** to HellaCache */
val mem = new HellaCacheIO
/** to Core
*
* contains CSRs info and performance statistics
*/
val dpath = new DatapathPTWIO
})
val s_ready :: s_req :: s_wait1 :: s_dummy1 :: s_wait2 :: s_wait3 :: s_dummy2 :: s_fragment_superpage :: Nil = Enum(8)
val state = RegInit(s_ready)
val l2_refill_wire = Wire(Bool())
/** Arbiter to arbite request from n TLB */
val arb = Module(new Arbiter(Valid(new PTWReq), n))
// use TLB req as arbitor's input
arb.io.in <> io.requestor.map(_.req)
// receive req only when s_ready and not in refill
arb.io.out.ready := (state === s_ready) && !l2_refill_wire
val resp_valid = RegNext(VecInit(Seq.fill(io.requestor.size)(false.B)))
val clock_en = state =/= s_ready || l2_refill_wire || arb.io.out.valid || io.dpath.sfence.valid || io.dpath.customCSRs.disableDCacheClockGate
io.dpath.clock_enabled := usingVM.B && clock_en
val gated_clock =
if (!usingVM || !tileParams.dcache.get.clockGate) clock
else ClockGate(clock, clock_en, "ptw_clock_gate")
withClock (gated_clock) { // entering gated-clock domain
val invalidated = Reg(Bool())
/** current PTE level
* {{{
* 0 <= count <= pgLevel-1
* count = pgLevel - 1 : leaf PTE
* count < pgLevel - 1 : non-leaf PTE
* }}}
*/
val count = Reg(UInt(log2Ceil(pgLevels).W))
val resp_ae_ptw = Reg(Bool())
val resp_ae_final = Reg(Bool())
val resp_pf = Reg(Bool())
val resp_gf = Reg(Bool())
val resp_hr = Reg(Bool())
val resp_hw = Reg(Bool())
val resp_hx = Reg(Bool())
val resp_fragmented_superpage = Reg(Bool())
/** tlb request */
val r_req = Reg(new PTWReq)
/** current selected way in arbitor */
val r_req_dest = Reg(Bits())
// to respond to L1TLB : l2_hit
// to construct mem.req.addr
val r_pte = Reg(new PTE)
val r_hgatp = Reg(new PTBR)
// 2-stage pageLevel
val aux_count = Reg(UInt(log2Ceil(pgLevels).W))
/** pte for 2-stage translation */
val aux_pte = Reg(new PTE)
val gpa_pgoff = Reg(UInt(pgIdxBits.W)) // only valid in resp_gf case
val stage2 = Reg(Bool())
val stage2_final = Reg(Bool())
val satp = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp, io.dpath.ptbr)
val r_hgatp_initial_count = pgLevels.U - minPgLevels.U - r_hgatp.additionalPgLevels
/** 2-stage translation both enable */
val do_both_stages = r_req.vstage1 && r_req.stage2
val max_count = count max aux_count
val vpn = Mux(r_req.vstage1 && stage2, aux_pte.ppn, r_req.addr)
val mem_resp_valid = RegNext(io.mem.resp.valid)
val mem_resp_data = RegNext(io.mem.resp.bits.data)
io.mem.uncached_resp.map { resp =>
assert(!(resp.valid && io.mem.resp.valid))
resp.ready := true.B
when (resp.valid) {
mem_resp_valid := true.B
mem_resp_data := resp.bits.data
}
}
// construct pte from mem.resp
val (pte, invalid_paddr, invalid_gpa) = {
val tmp = mem_resp_data.asTypeOf(new PTE())
val res = WireDefault(tmp)
res.ppn := Mux(do_both_stages && !stage2, tmp.ppn(vpnBits.min(tmp.ppn.getWidth)-1, 0), tmp.ppn(ppnBits-1, 0))
when (tmp.r || tmp.w || tmp.x) {
// for superpage mappings, make sure PPN LSBs are zero
for (i <- 0 until pgLevels-1)
when (count <= i.U && tmp.ppn((pgLevels-1-i)*pgLevelBits-1, (pgLevels-2-i)*pgLevelBits) =/= 0.U) { res.v := false.B }
}
(res,
Mux(do_both_stages && !stage2, (tmp.ppn >> vpnBits) =/= 0.U, (tmp.ppn >> ppnBits) =/= 0.U),
do_both_stages && !stage2 && checkInvalidHypervisorGPA(r_hgatp, tmp.ppn))
}
// find non-leaf PTE, need traverse
val traverse = pte.table() && !invalid_paddr && !invalid_gpa && count < (pgLevels-1).U
/** address send to mem for enquerry */
val pte_addr = if (!usingVM) 0.U else {
val vpn_idxs = (0 until pgLevels).map { i =>
val width = pgLevelBits + (if (i <= pgLevels - minPgLevels) hypervisorExtraAddrBits else 0)
(vpn >> (pgLevels - i - 1) * pgLevelBits)(width - 1, 0)
}
val mask = Mux(stage2 && count === r_hgatp_initial_count, ((1 << (hypervisorExtraAddrBits + pgLevelBits)) - 1).U, ((1 << pgLevelBits) - 1).U)
val vpn_idx = vpn_idxs(count) & mask
val raw_pte_addr = ((r_pte.ppn << pgLevelBits) | vpn_idx) << log2Ceil(xLen / 8)
val size = if (usingHypervisor) vaddrBits else paddrBits
//use r_pte.ppn as page table base address
//use vpn slice as offset
raw_pte_addr.apply(size.min(raw_pte_addr.getWidth) - 1, 0)
}
/** stage2_pte_cache input addr */
val stage2_pte_cache_addr = if (!usingHypervisor) 0.U else {
val vpn_idxs = (0 until pgLevels - 1).map { i =>
(r_req.addr >> (pgLevels - i - 1) * pgLevelBits)(pgLevelBits - 1, 0)
}
val vpn_idx = vpn_idxs(aux_count)
val raw_s2_pte_cache_addr = Cat(aux_pte.ppn, vpn_idx) << log2Ceil(xLen / 8)
raw_s2_pte_cache_addr(vaddrBits.min(raw_s2_pte_cache_addr.getWidth) - 1, 0)
}
def makeFragmentedSuperpagePPN(ppn: UInt): Seq[UInt] = {
(pgLevels-1 until 0 by -1).map(i => Cat(ppn >> (pgLevelBits*i), r_req.addr(((pgLevelBits*i) min vpnBits)-1, 0).padTo(pgLevelBits*i)))
}
/** PTECache caches non-leaf PTE
* @param s2 true: 2-stage address translation
*/
def makePTECache(s2: Boolean): (Bool, UInt) = if (coreParams.nPTECacheEntries == 0) {
(false.B, 0.U)
} else {
val plru = new PseudoLRU(coreParams.nPTECacheEntries)
val valid = RegInit(0.U(coreParams.nPTECacheEntries.W))
val tags = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor) 1 + vaddrBits else paddrBits).W)))
// not include full pte, only ppn
val data = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor && s2) vpnBits else ppnBits).W)))
val can_hit =
if (s2) count === r_hgatp_initial_count && aux_count < (pgLevels-1).U && r_req.vstage1 && stage2 && !stage2_final
else count < (pgLevels-1).U && Mux(r_req.vstage1, stage2, !r_req.stage2)
val can_refill =
if (s2) do_both_stages && !stage2 && !stage2_final
else can_hit
val tag =
if (s2) Cat(true.B, stage2_pte_cache_addr.padTo(vaddrBits))
else Cat(r_req.vstage1, pte_addr.padTo(if (usingHypervisor) vaddrBits else paddrBits))
val hits = tags.map(_ === tag).asUInt & valid
val hit = hits.orR && can_hit
// refill with mem response
when (mem_resp_valid && traverse && can_refill && !hits.orR && !invalidated) {
val r = Mux(valid.andR, plru.way, PriorityEncoder(~valid))
valid := valid | UIntToOH(r)
tags(r) := tag
data(r) := pte.ppn
plru.access(r)
}
// replace
when (hit && state === s_req) { plru.access(OHToUInt(hits)) }
when (io.dpath.sfence.valid && (!io.dpath.sfence.bits.rs1 || usingHypervisor.B && io.dpath.sfence.bits.hg)) { valid := 0.U }
val lcount = if (s2) aux_count else count
for (i <- 0 until pgLevels-1) {
ccover(hit && state === s_req && lcount === i.U, s"PTE_CACHE_HIT_L$i", s"PTE cache hit, level $i")
}
(hit, Mux1H(hits, data))
}
// generate pte_cache
val (pte_cache_hit, pte_cache_data) = makePTECache(false)
// generate pte_cache with 2-stage translation
val (stage2_pte_cache_hit, stage2_pte_cache_data) = makePTECache(true)
// pte_cache hit or 2-stage pte_cache hit
val pte_hit = RegNext(false.B)
io.dpath.perf.pte_miss := false.B
io.dpath.perf.pte_hit := pte_hit && (state === s_req) && !io.dpath.perf.l2hit
assert(!(io.dpath.perf.l2hit && (io.dpath.perf.pte_miss || io.dpath.perf.pte_hit)),
"PTE Cache Hit/Miss Performance Monitor Events are lower priority than L2TLB Hit event")
// l2_refill happens when find the leaf pte
val l2_refill = RegNext(false.B)
l2_refill_wire := l2_refill
io.dpath.perf.l2miss := false.B
io.dpath.perf.l2hit := false.B
// l2tlb
val (l2_hit, l2_error, l2_pte, l2_tlb_ram) = if (coreParams.nL2TLBEntries == 0) (false.B, false.B, WireDefault(0.U.asTypeOf(new PTE)), None) else {
val code = new ParityCode
require(isPow2(coreParams.nL2TLBEntries))
require(isPow2(coreParams.nL2TLBWays))
require(coreParams.nL2TLBEntries >= coreParams.nL2TLBWays)
val nL2TLBSets = coreParams.nL2TLBEntries / coreParams.nL2TLBWays
require(isPow2(nL2TLBSets))
val idxBits = log2Ceil(nL2TLBSets)
val l2_plru = new SetAssocLRU(nL2TLBSets, coreParams.nL2TLBWays, "plru")
val ram = DescribedSRAM(
name = "l2_tlb_ram",
desc = "L2 TLB",
size = nL2TLBSets,
data = Vec(coreParams.nL2TLBWays, UInt(code.width(new L2TLBEntry(nL2TLBSets).getWidth).W))
)
val g = Reg(Vec(coreParams.nL2TLBWays, UInt(nL2TLBSets.W)))
val valid = RegInit(VecInit(Seq.fill(coreParams.nL2TLBWays)(0.U(nL2TLBSets.W))))
// use r_req to construct tag
val (r_tag, r_idx) = Split(Cat(r_req.vstage1, r_req.addr(maxSVAddrBits-pgIdxBits-1, 0)), idxBits)
/** the valid vec for the selected set(including n ways) */
val r_valid_vec = valid.map(_(r_idx)).asUInt
val r_valid_vec_q = Reg(UInt(coreParams.nL2TLBWays.W))
val r_l2_plru_way = Reg(UInt(log2Ceil(coreParams.nL2TLBWays max 1).W))
r_valid_vec_q := r_valid_vec
// replacement way
r_l2_plru_way := (if (coreParams.nL2TLBWays > 1) l2_plru.way(r_idx) else 0.U)
// refill with r_pte(leaf pte)
when (l2_refill && !invalidated) {
val entry = Wire(new L2TLBEntry(nL2TLBSets))
entry.ppn := r_pte.ppn
entry.d := r_pte.d
entry.a := r_pte.a
entry.u := r_pte.u
entry.x := r_pte.x
entry.w := r_pte.w
entry.r := r_pte.r
entry.tag := r_tag
// if all the way are valid, use plru to select one way to be replaced,
// otherwise use PriorityEncoderOH to select one
val wmask = if (coreParams.nL2TLBWays > 1) Mux(r_valid_vec_q.andR, UIntToOH(r_l2_plru_way, coreParams.nL2TLBWays), PriorityEncoderOH(~r_valid_vec_q)) else 1.U(1.W)
ram.write(r_idx, VecInit(Seq.fill(coreParams.nL2TLBWays)(code.encode(entry.asUInt))), wmask.asBools)
val mask = UIntToOH(r_idx)
for (way <- 0 until coreParams.nL2TLBWays) {
when (wmask(way)) {
valid(way) := valid(way) | mask
g(way) := Mux(r_pte.g, g(way) | mask, g(way) & ~mask)
}
}
}
// sfence happens
when (io.dpath.sfence.valid) {
val hg = usingHypervisor.B && io.dpath.sfence.bits.hg
for (way <- 0 until coreParams.nL2TLBWays) {
valid(way) :=
Mux(!hg && io.dpath.sfence.bits.rs1, valid(way) & ~UIntToOH(io.dpath.sfence.bits.addr(idxBits+pgIdxBits-1, pgIdxBits)),
Mux(!hg && io.dpath.sfence.bits.rs2, valid(way) & g(way),
0.U))
}
}
val s0_valid = !l2_refill && arb.io.out.fire
val s0_suitable = arb.io.out.bits.bits.vstage1 === arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.need_gpa
val s1_valid = RegNext(s0_valid && s0_suitable && arb.io.out.bits.valid)
val s2_valid = RegNext(s1_valid)
// read from tlb idx
val s1_rdata = ram.read(arb.io.out.bits.bits.addr(idxBits-1, 0), s0_valid)
val s2_rdata = s1_rdata.map(s1_rdway => code.decode(RegEnable(s1_rdway, s1_valid)))
val s2_valid_vec = RegEnable(r_valid_vec, s1_valid)
val s2_g_vec = RegEnable(VecInit(g.map(_(r_idx))), s1_valid)
val s2_error = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && s2_rdata(way).error).orR
when (s2_valid && s2_error) { valid.foreach { _ := 0.U }}
// decode
val s2_entry_vec = s2_rdata.map(_.uncorrected.asTypeOf(new L2TLBEntry(nL2TLBSets)))
val s2_hit_vec = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && (r_tag === s2_entry_vec(way).tag))
val s2_hit = s2_valid && s2_hit_vec.orR
io.dpath.perf.l2miss := s2_valid && !(s2_hit_vec.orR)
io.dpath.perf.l2hit := s2_hit
when (s2_hit) {
l2_plru.access(r_idx, OHToUInt(s2_hit_vec))
assert((PopCount(s2_hit_vec) === 1.U) || s2_error, "L2 TLB multi-hit")
}
val s2_pte = Wire(new PTE)
val s2_hit_entry = Mux1H(s2_hit_vec, s2_entry_vec)
s2_pte.ppn := s2_hit_entry.ppn
s2_pte.d := s2_hit_entry.d
s2_pte.a := s2_hit_entry.a
s2_pte.g := Mux1H(s2_hit_vec, s2_g_vec)
s2_pte.u := s2_hit_entry.u
s2_pte.x := s2_hit_entry.x
s2_pte.w := s2_hit_entry.w
s2_pte.r := s2_hit_entry.r
s2_pte.v := true.B
s2_pte.reserved_for_future := 0.U
s2_pte.reserved_for_software := 0.U
for (way <- 0 until coreParams.nL2TLBWays) {
ccover(s2_hit && s2_hit_vec(way), s"L2_TLB_HIT_WAY$way", s"L2 TLB hit way$way")
}
(s2_hit, s2_error, s2_pte, Some(ram))
}
// if SFENCE occurs during walk, don't refill PTE cache or L2 TLB until next walk
invalidated := io.dpath.sfence.valid || (invalidated && state =/= s_ready)
// mem request
io.mem.keep_clock_enabled := false.B
io.mem.req.valid := state === s_req || state === s_dummy1
io.mem.req.bits.phys := true.B
io.mem.req.bits.cmd := M_XRD
io.mem.req.bits.size := log2Ceil(xLen/8).U
io.mem.req.bits.signed := false.B
io.mem.req.bits.addr := pte_addr
io.mem.req.bits.idx.foreach(_ := pte_addr)
io.mem.req.bits.dprv := PRV.S.U // PTW accesses are S-mode by definition
io.mem.req.bits.dv := do_both_stages && !stage2
io.mem.req.bits.tag := DontCare
io.mem.req.bits.no_resp := false.B
io.mem.req.bits.no_alloc := DontCare
io.mem.req.bits.no_xcpt := DontCare
io.mem.req.bits.data := DontCare
io.mem.req.bits.mask := DontCare
io.mem.s1_kill := l2_hit || (state =/= s_wait1) || resp_gf
io.mem.s1_data := DontCare
io.mem.s2_kill := false.B
val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits)
require(!usingHypervisor || pageGranularityPMPs, s"hypervisor requires pmpGranularity >= ${1<<pgIdxBits}")
val pmaPgLevelHomogeneous = (0 until pgLevels) map { i =>
val pgSize = BigInt(1) << (pgIdxBits + ((pgLevels - 1 - i) * pgLevelBits))
if (pageGranularityPMPs && i == pgLevels - 1) {
require(TLBPageLookup.homogeneous(edge.manager.managers, pgSize), s"All memory regions must be $pgSize-byte aligned")
true.B
} else {
TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), pgSize, xLen/8)(r_pte.ppn << pgIdxBits).homogeneous
}
}
val pmaHomogeneous = pmaPgLevelHomogeneous(count)
val pmpHomogeneous = new PMPHomogeneityChecker(io.dpath.pmp).apply(r_pte.ppn << pgIdxBits, count)
val homogeneous = pmaHomogeneous && pmpHomogeneous
// response to tlb
for (i <- 0 until io.requestor.size) {
io.requestor(i).resp.valid := resp_valid(i)
io.requestor(i).resp.bits.ae_ptw := resp_ae_ptw
io.requestor(i).resp.bits.ae_final := resp_ae_final
io.requestor(i).resp.bits.pf := resp_pf
io.requestor(i).resp.bits.gf := resp_gf
io.requestor(i).resp.bits.hr := resp_hr
io.requestor(i).resp.bits.hw := resp_hw
io.requestor(i).resp.bits.hx := resp_hx
io.requestor(i).resp.bits.pte := r_pte
io.requestor(i).resp.bits.level := max_count
io.requestor(i).resp.bits.homogeneous := homogeneous || pageGranularityPMPs.B
io.requestor(i).resp.bits.fragmented_superpage := resp_fragmented_superpage && pageGranularityPMPs.B
io.requestor(i).resp.bits.gpa.valid := r_req.need_gpa
io.requestor(i).resp.bits.gpa.bits :=
Cat(Mux(!stage2_final || !r_req.vstage1 || aux_count === (pgLevels - 1).U, aux_pte.ppn, makeFragmentedSuperpagePPN(aux_pte.ppn)(aux_count)), gpa_pgoff)
io.requestor(i).resp.bits.gpa_is_pte := !stage2_final
io.requestor(i).ptbr := io.dpath.ptbr
io.requestor(i).hgatp := io.dpath.hgatp
io.requestor(i).vsatp := io.dpath.vsatp
io.requestor(i).customCSRs <> io.dpath.customCSRs
io.requestor(i).status := io.dpath.status
io.requestor(i).hstatus := io.dpath.hstatus
io.requestor(i).gstatus := io.dpath.gstatus
io.requestor(i).pmp := io.dpath.pmp
}
// control state machine
val next_state = WireDefault(state)
state := OptimizationBarrier(next_state)
val do_switch = WireDefault(false.B)
switch (state) {
is (s_ready) {
when (arb.io.out.fire) {
val satp_initial_count = pgLevels.U - minPgLevels.U - satp.additionalPgLevels
val vsatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.vsatp.additionalPgLevels
val hgatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.hgatp.additionalPgLevels
val aux_ppn = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp.ppn, arb.io.out.bits.bits.addr)
r_req := arb.io.out.bits.bits
r_req_dest := arb.io.chosen
next_state := Mux(arb.io.out.bits.valid, s_req, s_ready)
stage2 := arb.io.out.bits.bits.stage2
stage2_final := arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.vstage1
count := Mux(arb.io.out.bits.bits.stage2, hgatp_initial_count, satp_initial_count)
aux_count := Mux(arb.io.out.bits.bits.vstage1, vsatp_initial_count, 0.U)
aux_pte.ppn := aux_ppn
aux_pte.reserved_for_future := 0.U
resp_ae_ptw := false.B
resp_ae_final := false.B
resp_pf := false.B
resp_gf := checkInvalidHypervisorGPA(io.dpath.hgatp, aux_ppn) && arb.io.out.bits.bits.stage2
resp_hr := true.B
resp_hw := true.B
resp_hx := true.B
resp_fragmented_superpage := false.B
r_hgatp := io.dpath.hgatp
assert(!arb.io.out.bits.bits.need_gpa || arb.io.out.bits.bits.stage2)
}
}
is (s_req) {
when(stage2 && count === r_hgatp_initial_count) {
gpa_pgoff := Mux(aux_count === (pgLevels-1).U, r_req.addr << (xLen/8).log2, stage2_pte_cache_addr)
}
// pte_cache hit
when (stage2_pte_cache_hit) {
aux_count := aux_count + 1.U
aux_pte.ppn := stage2_pte_cache_data
aux_pte.reserved_for_future := 0.U
pte_hit := true.B
}.elsewhen (pte_cache_hit) {
count := count + 1.U
pte_hit := true.B
}.otherwise {
next_state := Mux(io.mem.req.ready, s_wait1, s_req)
}
when(resp_gf) {
next_state := s_ready
resp_valid(r_req_dest) := true.B
}
}
is (s_wait1) {
// This Mux is for the l2_error case; the l2_hit && !l2_error case is overriden below
next_state := Mux(l2_hit, s_req, s_wait2)
}
is (s_wait2) {
next_state := s_wait3
io.dpath.perf.pte_miss := count < (pgLevels-1).U
when (io.mem.s2_xcpt.ae.ld) {
resp_ae_ptw := true.B
next_state := s_ready
resp_valid(r_req_dest) := true.B
}
}
is (s_fragment_superpage) {
next_state := s_ready
resp_valid(r_req_dest) := true.B
when (!homogeneous) {
count := (pgLevels-1).U
resp_fragmented_superpage := true.B
}
when (do_both_stages) {
resp_fragmented_superpage := true.B
}
}
}
val merged_pte = {
val superpage_masks = (0 until pgLevels).map(i => ((BigInt(1) << pte.ppn.getWidth) - (BigInt(1) << (pgLevels-1-i)*pgLevelBits)).U)
val superpage_mask = superpage_masks(Mux(stage2_final, max_count, (pgLevels-1).U))
val stage1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), aux_pte.ppn((pgLevels-i-1)*pgLevelBits-1,0))) :+ pte.ppn
val stage1_ppn = stage1_ppns(count)
makePTE(stage1_ppn & superpage_mask, aux_pte)
}
r_pte := OptimizationBarrier(
// l2tlb hit->find a leaf PTE(l2_pte), respond to L1TLB
Mux(l2_hit && !l2_error && !resp_gf, l2_pte,
// S2 PTE cache hit -> proceed to the next level of walking, update the r_pte with hgatp
Mux(state === s_req && stage2_pte_cache_hit, makeHypervisorRootPTE(r_hgatp, stage2_pte_cache_data, l2_pte),
// pte cache hit->find a non-leaf PTE(pte_cache),continue to request mem
Mux(state === s_req && pte_cache_hit, makePTE(pte_cache_data, l2_pte),
// 2-stage translation
Mux(do_switch, makeHypervisorRootPTE(r_hgatp, pte.ppn, r_pte),
// when mem respond, store mem.resp.pte
Mux(mem_resp_valid, Mux(!traverse && r_req.vstage1 && stage2, merged_pte, pte),
// fragment_superpage
Mux(state === s_fragment_superpage && !homogeneous && count =/= (pgLevels - 1).U, makePTE(makeFragmentedSuperpagePPN(r_pte.ppn)(count), r_pte),
// when tlb request come->request mem, use root address in satp(or vsatp,hgatp)
Mux(arb.io.out.fire, Mux(arb.io.out.bits.bits.stage2, makeHypervisorRootPTE(io.dpath.hgatp, io.dpath.vsatp.ppn, r_pte), makePTE(satp.ppn, r_pte)),
r_pte))))))))
when (l2_hit && !l2_error && !resp_gf) {
assert(state === s_req || state === s_wait1)
next_state := s_ready
resp_valid(r_req_dest) := true.B
count := (pgLevels-1).U
}
when (mem_resp_valid) {
assert(state === s_wait3)
next_state := s_req
when (traverse) {
when (do_both_stages && !stage2) { do_switch := true.B }
count := count + 1.U
}.otherwise {
val gf = (stage2 && !stage2_final && !pte.ur()) || (pte.leaf() && pte.reserved_for_future === 0.U && invalid_gpa)
val ae = pte.v && invalid_paddr
val pf = pte.v && pte.reserved_for_future =/= 0.U
val success = pte.v && !ae && !pf && !gf
when (do_both_stages && !stage2_final && success) {
when (stage2) {
stage2 := false.B
count := aux_count
}.otherwise {
stage2_final := true.B
do_switch := true.B
}
}.otherwise {
// find a leaf pte, start l2 refill
l2_refill := success && count === (pgLevels-1).U && !r_req.need_gpa &&
(!r_req.vstage1 && !r_req.stage2 ||
do_both_stages && aux_count === (pgLevels-1).U && pte.isFullPerm())
count := max_count
when (pageGranularityPMPs.B && !(count === (pgLevels-1).U && (!do_both_stages || aux_count === (pgLevels-1).U))) {
next_state := s_fragment_superpage
}.otherwise {
next_state := s_ready
resp_valid(r_req_dest) := true.B
}
resp_ae_ptw := ae && count < (pgLevels-1).U && pte.table()
resp_ae_final := ae && pte.leaf()
resp_pf := pf && !stage2
resp_gf := gf || (pf && stage2)
resp_hr := !stage2 || (!pf && !gf && pte.ur())
resp_hw := !stage2 || (!pf && !gf && pte.uw())
resp_hx := !stage2 || (!pf && !gf && pte.ux())
}
}
}
when (io.mem.s2_nack) {
assert(state === s_wait2)
next_state := s_req
}
when (do_switch) {
aux_count := Mux(traverse, count + 1.U, count)
count := r_hgatp_initial_count
aux_pte := Mux(traverse, pte, {
val s1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), r_req.addr(((pgLevels-i-1)*pgLevelBits min vpnBits)-1,0).padTo((pgLevels-i-1)*pgLevelBits))) :+ pte.ppn
makePTE(s1_ppns(count), pte)
})
stage2 := true.B
}
for (i <- 0 until pgLevels) {
val leaf = mem_resp_valid && !traverse && count === i.U
ccover(leaf && pte.v && !invalid_paddr && !invalid_gpa && pte.reserved_for_future === 0.U, s"L$i", s"successful page-table access, level $i")
ccover(leaf && pte.v && invalid_paddr, s"L${i}_BAD_PPN_MSB", s"PPN too large, level $i")
ccover(leaf && pte.v && invalid_gpa, s"L${i}_BAD_GPA_MSB", s"GPA too large, level $i")
ccover(leaf && pte.v && pte.reserved_for_future =/= 0.U, s"L${i}_BAD_RSV_MSB", s"reserved MSBs set, level $i")
ccover(leaf && !mem_resp_data(0), s"L${i}_INVALID_PTE", s"page not present, level $i")
if (i != pgLevels-1)
ccover(leaf && !pte.v && mem_resp_data(0), s"L${i}_BAD_PPN_LSB", s"PPN LSBs not zero, level $i")
}
ccover(mem_resp_valid && count === (pgLevels-1).U && pte.table(), s"TOO_DEEP", s"page table too deep")
ccover(io.mem.s2_nack, "NACK", "D$ nacked page-table access")
ccover(state === s_wait2 && io.mem.s2_xcpt.ae.ld, "AE", "access exception while walking page table")
} // leaving gated-clock domain
private def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
if (usingVM) property.cover(cond, s"PTW_$label", "MemorySystem;;" + desc)
/** Relace PTE.ppn with ppn */
private def makePTE(ppn: UInt, default: PTE) = {
val pte = WireDefault(default)
pte.ppn := ppn
pte
}
/** use hgatp and vpn to construct a new ppn */
private def makeHypervisorRootPTE(hgatp: PTBR, vpn: UInt, default: PTE) = {
val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels
val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> (pgLevels-i)*pgLevelBits))
val lsbs = WireDefault(UInt(maxHypervisorExtraAddrBits.W), idxs(count))
val pte = WireDefault(default)
pte.ppn := Cat(hgatp.ppn >> maxHypervisorExtraAddrBits, lsbs)
pte
}
/** use hgatp and vpn to check for gpa out of range */
private def checkInvalidHypervisorGPA(hgatp: PTBR, vpn: UInt) = {
val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels
val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> ((pgLevels-i)*pgLevelBits)+maxHypervisorExtraAddrBits))
idxs.extract(count) =/= 0.U
}
}
/** Mix-ins for constructing tiles that might have a PTW */
trait CanHavePTW extends HasTileParameters with HasHellaCache { this: BaseTile =>
val module: CanHavePTWModule
var nPTWPorts = 1
nDCachePorts += usingPTW.toInt
}
trait CanHavePTWModule extends HasHellaCacheModule {
val outer: CanHavePTW
val ptwPorts = ListBuffer(outer.dcache.module.io.ptw)
val ptw = Module(new PTW(outer.nPTWPorts)(outer.dcache.node.edges.out(0), outer.p))
ptw.io.mem <> DontCare
if (outer.usingPTW) {
dcachePorts += ptw.io.mem
}
}
| module PTW_4( // @[PTW.scala:219:7]
input clock, // @[PTW.scala:219:7]
input reset, // @[PTW.scala:219:7]
output io_requestor_0_req_ready, // @[PTW.scala:220:14]
input io_requestor_0_req_valid, // @[PTW.scala:220:14]
input [26:0] io_requestor_0_req_bits_bits_addr, // @[PTW.scala:220:14]
input io_requestor_0_req_bits_bits_need_gpa, // @[PTW.scala:220:14]
output io_requestor_0_resp_valid, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_ae_ptw, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_ae_final, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_pf, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_gf, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_hr, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_hw, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_hx, // @[PTW.scala:220:14]
output [9:0] io_requestor_0_resp_bits_pte_reserved_for_future, // @[PTW.scala:220:14]
output [43:0] io_requestor_0_resp_bits_pte_ppn, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_resp_bits_pte_reserved_for_software, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_pte_d, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_pte_a, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_pte_g, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_pte_u, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_pte_x, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_pte_w, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_pte_r, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_pte_v, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_resp_bits_level, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_homogeneous, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_gpa_valid, // @[PTW.scala:220:14]
output [38:0] io_requestor_0_resp_bits_gpa_bits, // @[PTW.scala:220:14]
output io_requestor_0_resp_bits_gpa_is_pte, // @[PTW.scala:220:14]
output [3:0] io_requestor_0_ptbr_mode, // @[PTW.scala:220:14]
output [43:0] io_requestor_0_ptbr_ppn, // @[PTW.scala:220:14]
output io_requestor_0_status_debug, // @[PTW.scala:220:14]
output io_requestor_0_status_cease, // @[PTW.scala:220:14]
output io_requestor_0_status_wfi, // @[PTW.scala:220:14]
output [31:0] io_requestor_0_status_isa, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_status_dprv, // @[PTW.scala:220:14]
output io_requestor_0_status_dv, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_status_prv, // @[PTW.scala:220:14]
output io_requestor_0_status_v, // @[PTW.scala:220:14]
output io_requestor_0_status_sd, // @[PTW.scala:220:14]
output io_requestor_0_status_mpv, // @[PTW.scala:220:14]
output io_requestor_0_status_gva, // @[PTW.scala:220:14]
output io_requestor_0_status_tsr, // @[PTW.scala:220:14]
output io_requestor_0_status_tw, // @[PTW.scala:220:14]
output io_requestor_0_status_tvm, // @[PTW.scala:220:14]
output io_requestor_0_status_mxr, // @[PTW.scala:220:14]
output io_requestor_0_status_sum, // @[PTW.scala:220:14]
output io_requestor_0_status_mprv, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_status_fs, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_status_mpp, // @[PTW.scala:220:14]
output io_requestor_0_status_spp, // @[PTW.scala:220:14]
output io_requestor_0_status_mpie, // @[PTW.scala:220:14]
output io_requestor_0_status_spie, // @[PTW.scala:220:14]
output io_requestor_0_status_mie, // @[PTW.scala:220:14]
output io_requestor_0_status_sie, // @[PTW.scala:220:14]
output io_requestor_0_hstatus_spvp, // @[PTW.scala:220:14]
output io_requestor_0_hstatus_spv, // @[PTW.scala:220:14]
output io_requestor_0_hstatus_gva, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_debug, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_cease, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_wfi, // @[PTW.scala:220:14]
output [31:0] io_requestor_0_gstatus_isa, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_gstatus_dprv, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_dv, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_gstatus_prv, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_v, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_sd, // @[PTW.scala:220:14]
output [22:0] io_requestor_0_gstatus_zero2, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_mpv, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_gva, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_mbe, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_sbe, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_gstatus_sxl, // @[PTW.scala:220:14]
output [7:0] io_requestor_0_gstatus_zero1, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_tsr, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_tw, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_tvm, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_mxr, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_sum, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_mprv, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_gstatus_fs, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_gstatus_mpp, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_gstatus_vs, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_spp, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_mpie, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_ube, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_spie, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_upie, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_mie, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_hie, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_sie, // @[PTW.scala:220:14]
output io_requestor_0_gstatus_uie, // @[PTW.scala:220:14]
output io_requestor_0_pmp_0_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_pmp_0_cfg_a, // @[PTW.scala:220:14]
output io_requestor_0_pmp_0_cfg_x, // @[PTW.scala:220:14]
output io_requestor_0_pmp_0_cfg_w, // @[PTW.scala:220:14]
output io_requestor_0_pmp_0_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_0_pmp_0_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_0_pmp_0_mask, // @[PTW.scala:220:14]
output io_requestor_0_pmp_1_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_pmp_1_cfg_a, // @[PTW.scala:220:14]
output io_requestor_0_pmp_1_cfg_x, // @[PTW.scala:220:14]
output io_requestor_0_pmp_1_cfg_w, // @[PTW.scala:220:14]
output io_requestor_0_pmp_1_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_0_pmp_1_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_0_pmp_1_mask, // @[PTW.scala:220:14]
output io_requestor_0_pmp_2_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_pmp_2_cfg_a, // @[PTW.scala:220:14]
output io_requestor_0_pmp_2_cfg_x, // @[PTW.scala:220:14]
output io_requestor_0_pmp_2_cfg_w, // @[PTW.scala:220:14]
output io_requestor_0_pmp_2_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_0_pmp_2_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_0_pmp_2_mask, // @[PTW.scala:220:14]
output io_requestor_0_pmp_3_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_pmp_3_cfg_a, // @[PTW.scala:220:14]
output io_requestor_0_pmp_3_cfg_x, // @[PTW.scala:220:14]
output io_requestor_0_pmp_3_cfg_w, // @[PTW.scala:220:14]
output io_requestor_0_pmp_3_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_0_pmp_3_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_0_pmp_3_mask, // @[PTW.scala:220:14]
output io_requestor_0_pmp_4_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_pmp_4_cfg_a, // @[PTW.scala:220:14]
output io_requestor_0_pmp_4_cfg_x, // @[PTW.scala:220:14]
output io_requestor_0_pmp_4_cfg_w, // @[PTW.scala:220:14]
output io_requestor_0_pmp_4_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_0_pmp_4_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_0_pmp_4_mask, // @[PTW.scala:220:14]
output io_requestor_0_pmp_5_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_pmp_5_cfg_a, // @[PTW.scala:220:14]
output io_requestor_0_pmp_5_cfg_x, // @[PTW.scala:220:14]
output io_requestor_0_pmp_5_cfg_w, // @[PTW.scala:220:14]
output io_requestor_0_pmp_5_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_0_pmp_5_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_0_pmp_5_mask, // @[PTW.scala:220:14]
output io_requestor_0_pmp_6_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_pmp_6_cfg_a, // @[PTW.scala:220:14]
output io_requestor_0_pmp_6_cfg_x, // @[PTW.scala:220:14]
output io_requestor_0_pmp_6_cfg_w, // @[PTW.scala:220:14]
output io_requestor_0_pmp_6_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_0_pmp_6_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_0_pmp_6_mask, // @[PTW.scala:220:14]
output io_requestor_0_pmp_7_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_0_pmp_7_cfg_a, // @[PTW.scala:220:14]
output io_requestor_0_pmp_7_cfg_x, // @[PTW.scala:220:14]
output io_requestor_0_pmp_7_cfg_w, // @[PTW.scala:220:14]
output io_requestor_0_pmp_7_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_0_pmp_7_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_0_pmp_7_mask, // @[PTW.scala:220:14]
output io_requestor_0_customCSRs_csrs_0_ren, // @[PTW.scala:220:14]
output io_requestor_0_customCSRs_csrs_0_wen, // @[PTW.scala:220:14]
output [63:0] io_requestor_0_customCSRs_csrs_0_wdata, // @[PTW.scala:220:14]
output [63:0] io_requestor_0_customCSRs_csrs_0_value, // @[PTW.scala:220:14]
output io_requestor_0_customCSRs_csrs_1_ren, // @[PTW.scala:220:14]
output io_requestor_0_customCSRs_csrs_1_wen, // @[PTW.scala:220:14]
output [63:0] io_requestor_0_customCSRs_csrs_1_wdata, // @[PTW.scala:220:14]
output [63:0] io_requestor_0_customCSRs_csrs_1_value, // @[PTW.scala:220:14]
output io_requestor_0_customCSRs_csrs_2_ren, // @[PTW.scala:220:14]
output io_requestor_0_customCSRs_csrs_2_wen, // @[PTW.scala:220:14]
output [63:0] io_requestor_0_customCSRs_csrs_2_wdata, // @[PTW.scala:220:14]
output [63:0] io_requestor_0_customCSRs_csrs_2_value, // @[PTW.scala:220:14]
output io_requestor_0_customCSRs_csrs_3_ren, // @[PTW.scala:220:14]
output io_requestor_0_customCSRs_csrs_3_wen, // @[PTW.scala:220:14]
output [63:0] io_requestor_0_customCSRs_csrs_3_wdata, // @[PTW.scala:220:14]
output [63:0] io_requestor_0_customCSRs_csrs_3_value, // @[PTW.scala:220:14]
output io_requestor_1_req_ready, // @[PTW.scala:220:14]
input io_requestor_1_req_valid, // @[PTW.scala:220:14]
input io_requestor_1_req_bits_valid, // @[PTW.scala:220:14]
input [26:0] io_requestor_1_req_bits_bits_addr, // @[PTW.scala:220:14]
input io_requestor_1_req_bits_bits_need_gpa, // @[PTW.scala:220:14]
output io_requestor_1_resp_valid, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_ae_ptw, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_ae_final, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_pf, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_gf, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_hr, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_hw, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_hx, // @[PTW.scala:220:14]
output [9:0] io_requestor_1_resp_bits_pte_reserved_for_future, // @[PTW.scala:220:14]
output [43:0] io_requestor_1_resp_bits_pte_ppn, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_resp_bits_pte_reserved_for_software, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_pte_d, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_pte_a, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_pte_g, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_pte_u, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_pte_x, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_pte_w, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_pte_r, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_pte_v, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_resp_bits_level, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_homogeneous, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_gpa_valid, // @[PTW.scala:220:14]
output [38:0] io_requestor_1_resp_bits_gpa_bits, // @[PTW.scala:220:14]
output io_requestor_1_resp_bits_gpa_is_pte, // @[PTW.scala:220:14]
output [3:0] io_requestor_1_ptbr_mode, // @[PTW.scala:220:14]
output [43:0] io_requestor_1_ptbr_ppn, // @[PTW.scala:220:14]
output io_requestor_1_status_debug, // @[PTW.scala:220:14]
output io_requestor_1_status_cease, // @[PTW.scala:220:14]
output io_requestor_1_status_wfi, // @[PTW.scala:220:14]
output [31:0] io_requestor_1_status_isa, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_status_dprv, // @[PTW.scala:220:14]
output io_requestor_1_status_dv, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_status_prv, // @[PTW.scala:220:14]
output io_requestor_1_status_v, // @[PTW.scala:220:14]
output io_requestor_1_status_sd, // @[PTW.scala:220:14]
output io_requestor_1_status_mpv, // @[PTW.scala:220:14]
output io_requestor_1_status_gva, // @[PTW.scala:220:14]
output io_requestor_1_status_tsr, // @[PTW.scala:220:14]
output io_requestor_1_status_tw, // @[PTW.scala:220:14]
output io_requestor_1_status_tvm, // @[PTW.scala:220:14]
output io_requestor_1_status_mxr, // @[PTW.scala:220:14]
output io_requestor_1_status_sum, // @[PTW.scala:220:14]
output io_requestor_1_status_mprv, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_status_fs, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_status_mpp, // @[PTW.scala:220:14]
output io_requestor_1_status_spp, // @[PTW.scala:220:14]
output io_requestor_1_status_mpie, // @[PTW.scala:220:14]
output io_requestor_1_status_spie, // @[PTW.scala:220:14]
output io_requestor_1_status_mie, // @[PTW.scala:220:14]
output io_requestor_1_status_sie, // @[PTW.scala:220:14]
output io_requestor_1_hstatus_spvp, // @[PTW.scala:220:14]
output io_requestor_1_hstatus_spv, // @[PTW.scala:220:14]
output io_requestor_1_hstatus_gva, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_debug, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_cease, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_wfi, // @[PTW.scala:220:14]
output [31:0] io_requestor_1_gstatus_isa, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_gstatus_dprv, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_dv, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_gstatus_prv, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_v, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_sd, // @[PTW.scala:220:14]
output [22:0] io_requestor_1_gstatus_zero2, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_mpv, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_gva, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_mbe, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_sbe, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_gstatus_sxl, // @[PTW.scala:220:14]
output [7:0] io_requestor_1_gstatus_zero1, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_tsr, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_tw, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_tvm, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_mxr, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_sum, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_mprv, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_gstatus_fs, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_gstatus_mpp, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_gstatus_vs, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_spp, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_mpie, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_ube, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_spie, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_upie, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_mie, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_hie, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_sie, // @[PTW.scala:220:14]
output io_requestor_1_gstatus_uie, // @[PTW.scala:220:14]
output io_requestor_1_pmp_0_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_pmp_0_cfg_a, // @[PTW.scala:220:14]
output io_requestor_1_pmp_0_cfg_x, // @[PTW.scala:220:14]
output io_requestor_1_pmp_0_cfg_w, // @[PTW.scala:220:14]
output io_requestor_1_pmp_0_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_1_pmp_0_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_1_pmp_0_mask, // @[PTW.scala:220:14]
output io_requestor_1_pmp_1_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_pmp_1_cfg_a, // @[PTW.scala:220:14]
output io_requestor_1_pmp_1_cfg_x, // @[PTW.scala:220:14]
output io_requestor_1_pmp_1_cfg_w, // @[PTW.scala:220:14]
output io_requestor_1_pmp_1_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_1_pmp_1_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_1_pmp_1_mask, // @[PTW.scala:220:14]
output io_requestor_1_pmp_2_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_pmp_2_cfg_a, // @[PTW.scala:220:14]
output io_requestor_1_pmp_2_cfg_x, // @[PTW.scala:220:14]
output io_requestor_1_pmp_2_cfg_w, // @[PTW.scala:220:14]
output io_requestor_1_pmp_2_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_1_pmp_2_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_1_pmp_2_mask, // @[PTW.scala:220:14]
output io_requestor_1_pmp_3_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_pmp_3_cfg_a, // @[PTW.scala:220:14]
output io_requestor_1_pmp_3_cfg_x, // @[PTW.scala:220:14]
output io_requestor_1_pmp_3_cfg_w, // @[PTW.scala:220:14]
output io_requestor_1_pmp_3_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_1_pmp_3_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_1_pmp_3_mask, // @[PTW.scala:220:14]
output io_requestor_1_pmp_4_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_pmp_4_cfg_a, // @[PTW.scala:220:14]
output io_requestor_1_pmp_4_cfg_x, // @[PTW.scala:220:14]
output io_requestor_1_pmp_4_cfg_w, // @[PTW.scala:220:14]
output io_requestor_1_pmp_4_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_1_pmp_4_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_1_pmp_4_mask, // @[PTW.scala:220:14]
output io_requestor_1_pmp_5_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_pmp_5_cfg_a, // @[PTW.scala:220:14]
output io_requestor_1_pmp_5_cfg_x, // @[PTW.scala:220:14]
output io_requestor_1_pmp_5_cfg_w, // @[PTW.scala:220:14]
output io_requestor_1_pmp_5_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_1_pmp_5_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_1_pmp_5_mask, // @[PTW.scala:220:14]
output io_requestor_1_pmp_6_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_pmp_6_cfg_a, // @[PTW.scala:220:14]
output io_requestor_1_pmp_6_cfg_x, // @[PTW.scala:220:14]
output io_requestor_1_pmp_6_cfg_w, // @[PTW.scala:220:14]
output io_requestor_1_pmp_6_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_1_pmp_6_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_1_pmp_6_mask, // @[PTW.scala:220:14]
output io_requestor_1_pmp_7_cfg_l, // @[PTW.scala:220:14]
output [1:0] io_requestor_1_pmp_7_cfg_a, // @[PTW.scala:220:14]
output io_requestor_1_pmp_7_cfg_x, // @[PTW.scala:220:14]
output io_requestor_1_pmp_7_cfg_w, // @[PTW.scala:220:14]
output io_requestor_1_pmp_7_cfg_r, // @[PTW.scala:220:14]
output [29:0] io_requestor_1_pmp_7_addr, // @[PTW.scala:220:14]
output [31:0] io_requestor_1_pmp_7_mask, // @[PTW.scala:220:14]
output io_requestor_1_customCSRs_csrs_0_ren, // @[PTW.scala:220:14]
output io_requestor_1_customCSRs_csrs_0_wen, // @[PTW.scala:220:14]
output [63:0] io_requestor_1_customCSRs_csrs_0_wdata, // @[PTW.scala:220:14]
output [63:0] io_requestor_1_customCSRs_csrs_0_value, // @[PTW.scala:220:14]
output io_requestor_1_customCSRs_csrs_1_ren, // @[PTW.scala:220:14]
output io_requestor_1_customCSRs_csrs_1_wen, // @[PTW.scala:220:14]
output [63:0] io_requestor_1_customCSRs_csrs_1_wdata, // @[PTW.scala:220:14]
output [63:0] io_requestor_1_customCSRs_csrs_1_value, // @[PTW.scala:220:14]
output io_requestor_1_customCSRs_csrs_2_ren, // @[PTW.scala:220:14]
output io_requestor_1_customCSRs_csrs_2_wen, // @[PTW.scala:220:14]
output [63:0] io_requestor_1_customCSRs_csrs_2_wdata, // @[PTW.scala:220:14]
output [63:0] io_requestor_1_customCSRs_csrs_2_value, // @[PTW.scala:220:14]
output io_requestor_1_customCSRs_csrs_3_ren, // @[PTW.scala:220:14]
output io_requestor_1_customCSRs_csrs_3_wen, // @[PTW.scala:220:14]
output [63:0] io_requestor_1_customCSRs_csrs_3_wdata, // @[PTW.scala:220:14]
output [63:0] io_requestor_1_customCSRs_csrs_3_value, // @[PTW.scala:220:14]
input io_mem_req_ready, // @[PTW.scala:220:14]
output io_mem_req_valid, // @[PTW.scala:220:14]
output [39:0] io_mem_req_bits_addr, // @[PTW.scala:220:14]
output io_mem_req_bits_dv, // @[PTW.scala:220:14]
output io_mem_s1_kill, // @[PTW.scala:220:14]
input io_mem_s2_nack, // @[PTW.scala:220:14]
input io_mem_s2_nack_cause_raw, // @[PTW.scala:220:14]
input io_mem_s2_uncached, // @[PTW.scala:220:14]
input [31:0] io_mem_s2_paddr, // @[PTW.scala:220:14]
input io_mem_resp_valid, // @[PTW.scala:220:14]
input [39:0] io_mem_resp_bits_addr, // @[PTW.scala:220:14]
input [6:0] io_mem_resp_bits_tag, // @[PTW.scala:220:14]
input [4:0] io_mem_resp_bits_cmd, // @[PTW.scala:220:14]
input [1:0] io_mem_resp_bits_size, // @[PTW.scala:220:14]
input io_mem_resp_bits_signed, // @[PTW.scala:220:14]
input [1:0] io_mem_resp_bits_dprv, // @[PTW.scala:220:14]
input io_mem_resp_bits_dv, // @[PTW.scala:220:14]
input [63:0] io_mem_resp_bits_data, // @[PTW.scala:220:14]
input [7:0] io_mem_resp_bits_mask, // @[PTW.scala:220:14]
input io_mem_resp_bits_replay, // @[PTW.scala:220:14]
input io_mem_resp_bits_has_data, // @[PTW.scala:220:14]
input [63:0] io_mem_resp_bits_data_word_bypass, // @[PTW.scala:220:14]
input [63:0] io_mem_resp_bits_data_raw, // @[PTW.scala:220:14]
input [63:0] io_mem_resp_bits_store_data, // @[PTW.scala:220:14]
input io_mem_replay_next, // @[PTW.scala:220:14]
input io_mem_s2_xcpt_ma_ld, // @[PTW.scala:220:14]
input io_mem_s2_xcpt_ma_st, // @[PTW.scala:220:14]
input io_mem_s2_xcpt_pf_ld, // @[PTW.scala:220:14]
input io_mem_s2_xcpt_pf_st, // @[PTW.scala:220:14]
input io_mem_s2_xcpt_ae_ld, // @[PTW.scala:220:14]
input io_mem_s2_xcpt_ae_st, // @[PTW.scala:220:14]
input [39:0] io_mem_s2_gpa, // @[PTW.scala:220:14]
input io_mem_ordered, // @[PTW.scala:220:14]
input io_mem_store_pending, // @[PTW.scala:220:14]
input io_mem_perf_acquire, // @[PTW.scala:220:14]
input io_mem_perf_release, // @[PTW.scala:220:14]
input io_mem_perf_grant, // @[PTW.scala:220:14]
input io_mem_perf_tlbMiss, // @[PTW.scala:220:14]
input io_mem_perf_blocked, // @[PTW.scala:220:14]
input io_mem_perf_canAcceptStoreThenLoad, // @[PTW.scala:220:14]
input io_mem_perf_canAcceptStoreThenRMW, // @[PTW.scala:220:14]
input io_mem_perf_canAcceptLoadThenLoad, // @[PTW.scala:220:14]
input io_mem_perf_storeBufferEmptyAfterLoad, // @[PTW.scala:220:14]
input io_mem_perf_storeBufferEmptyAfterStore, // @[PTW.scala:220:14]
input [3:0] io_dpath_ptbr_mode, // @[PTW.scala:220:14]
input [43:0] io_dpath_ptbr_ppn, // @[PTW.scala:220:14]
input io_dpath_sfence_valid, // @[PTW.scala:220:14]
input io_dpath_sfence_bits_rs1, // @[PTW.scala:220:14]
input io_dpath_sfence_bits_rs2, // @[PTW.scala:220:14]
input [38:0] io_dpath_sfence_bits_addr, // @[PTW.scala:220:14]
input io_dpath_sfence_bits_asid, // @[PTW.scala:220:14]
input io_dpath_sfence_bits_hv, // @[PTW.scala:220:14]
input io_dpath_sfence_bits_hg, // @[PTW.scala:220:14]
input io_dpath_status_debug, // @[PTW.scala:220:14]
input io_dpath_status_cease, // @[PTW.scala:220:14]
input io_dpath_status_wfi, // @[PTW.scala:220:14]
input [31:0] io_dpath_status_isa, // @[PTW.scala:220:14]
input [1:0] io_dpath_status_dprv, // @[PTW.scala:220:14]
input io_dpath_status_dv, // @[PTW.scala:220:14]
input [1:0] io_dpath_status_prv, // @[PTW.scala:220:14]
input io_dpath_status_v, // @[PTW.scala:220:14]
input io_dpath_status_sd, // @[PTW.scala:220:14]
input io_dpath_status_mpv, // @[PTW.scala:220:14]
input io_dpath_status_gva, // @[PTW.scala:220:14]
input io_dpath_status_tsr, // @[PTW.scala:220:14]
input io_dpath_status_tw, // @[PTW.scala:220:14]
input io_dpath_status_tvm, // @[PTW.scala:220:14]
input io_dpath_status_mxr, // @[PTW.scala:220:14]
input io_dpath_status_sum, // @[PTW.scala:220:14]
input io_dpath_status_mprv, // @[PTW.scala:220:14]
input [1:0] io_dpath_status_fs, // @[PTW.scala:220:14]
input [1:0] io_dpath_status_mpp, // @[PTW.scala:220:14]
input io_dpath_status_spp, // @[PTW.scala:220:14]
input io_dpath_status_mpie, // @[PTW.scala:220:14]
input io_dpath_status_spie, // @[PTW.scala:220:14]
input io_dpath_status_mie, // @[PTW.scala:220:14]
input io_dpath_status_sie, // @[PTW.scala:220:14]
input io_dpath_hstatus_spvp, // @[PTW.scala:220:14]
input io_dpath_hstatus_spv, // @[PTW.scala:220:14]
input io_dpath_hstatus_gva, // @[PTW.scala:220:14]
input io_dpath_gstatus_debug, // @[PTW.scala:220:14]
input io_dpath_gstatus_cease, // @[PTW.scala:220:14]
input io_dpath_gstatus_wfi, // @[PTW.scala:220:14]
input [31:0] io_dpath_gstatus_isa, // @[PTW.scala:220:14]
input [1:0] io_dpath_gstatus_dprv, // @[PTW.scala:220:14]
input io_dpath_gstatus_dv, // @[PTW.scala:220:14]
input [1:0] io_dpath_gstatus_prv, // @[PTW.scala:220:14]
input io_dpath_gstatus_v, // @[PTW.scala:220:14]
input io_dpath_gstatus_sd, // @[PTW.scala:220:14]
input [22:0] io_dpath_gstatus_zero2, // @[PTW.scala:220:14]
input io_dpath_gstatus_mpv, // @[PTW.scala:220:14]
input io_dpath_gstatus_gva, // @[PTW.scala:220:14]
input io_dpath_gstatus_mbe, // @[PTW.scala:220:14]
input io_dpath_gstatus_sbe, // @[PTW.scala:220:14]
input [1:0] io_dpath_gstatus_sxl, // @[PTW.scala:220:14]
input [7:0] io_dpath_gstatus_zero1, // @[PTW.scala:220:14]
input io_dpath_gstatus_tsr, // @[PTW.scala:220:14]
input io_dpath_gstatus_tw, // @[PTW.scala:220:14]
input io_dpath_gstatus_tvm, // @[PTW.scala:220:14]
input io_dpath_gstatus_mxr, // @[PTW.scala:220:14]
input io_dpath_gstatus_sum, // @[PTW.scala:220:14]
input io_dpath_gstatus_mprv, // @[PTW.scala:220:14]
input [1:0] io_dpath_gstatus_fs, // @[PTW.scala:220:14]
input [1:0] io_dpath_gstatus_mpp, // @[PTW.scala:220:14]
input [1:0] io_dpath_gstatus_vs, // @[PTW.scala:220:14]
input io_dpath_gstatus_spp, // @[PTW.scala:220:14]
input io_dpath_gstatus_mpie, // @[PTW.scala:220:14]
input io_dpath_gstatus_ube, // @[PTW.scala:220:14]
input io_dpath_gstatus_spie, // @[PTW.scala:220:14]
input io_dpath_gstatus_upie, // @[PTW.scala:220:14]
input io_dpath_gstatus_mie, // @[PTW.scala:220:14]
input io_dpath_gstatus_hie, // @[PTW.scala:220:14]
input io_dpath_gstatus_sie, // @[PTW.scala:220:14]
input io_dpath_gstatus_uie, // @[PTW.scala:220:14]
input io_dpath_pmp_0_cfg_l, // @[PTW.scala:220:14]
input [1:0] io_dpath_pmp_0_cfg_a, // @[PTW.scala:220:14]
input io_dpath_pmp_0_cfg_x, // @[PTW.scala:220:14]
input io_dpath_pmp_0_cfg_w, // @[PTW.scala:220:14]
input io_dpath_pmp_0_cfg_r, // @[PTW.scala:220:14]
input [29:0] io_dpath_pmp_0_addr, // @[PTW.scala:220:14]
input [31:0] io_dpath_pmp_0_mask, // @[PTW.scala:220:14]
input io_dpath_pmp_1_cfg_l, // @[PTW.scala:220:14]
input [1:0] io_dpath_pmp_1_cfg_a, // @[PTW.scala:220:14]
input io_dpath_pmp_1_cfg_x, // @[PTW.scala:220:14]
input io_dpath_pmp_1_cfg_w, // @[PTW.scala:220:14]
input io_dpath_pmp_1_cfg_r, // @[PTW.scala:220:14]
input [29:0] io_dpath_pmp_1_addr, // @[PTW.scala:220:14]
input [31:0] io_dpath_pmp_1_mask, // @[PTW.scala:220:14]
input io_dpath_pmp_2_cfg_l, // @[PTW.scala:220:14]
input [1:0] io_dpath_pmp_2_cfg_a, // @[PTW.scala:220:14]
input io_dpath_pmp_2_cfg_x, // @[PTW.scala:220:14]
input io_dpath_pmp_2_cfg_w, // @[PTW.scala:220:14]
input io_dpath_pmp_2_cfg_r, // @[PTW.scala:220:14]
input [29:0] io_dpath_pmp_2_addr, // @[PTW.scala:220:14]
input [31:0] io_dpath_pmp_2_mask, // @[PTW.scala:220:14]
input io_dpath_pmp_3_cfg_l, // @[PTW.scala:220:14]
input [1:0] io_dpath_pmp_3_cfg_a, // @[PTW.scala:220:14]
input io_dpath_pmp_3_cfg_x, // @[PTW.scala:220:14]
input io_dpath_pmp_3_cfg_w, // @[PTW.scala:220:14]
input io_dpath_pmp_3_cfg_r, // @[PTW.scala:220:14]
input [29:0] io_dpath_pmp_3_addr, // @[PTW.scala:220:14]
input [31:0] io_dpath_pmp_3_mask, // @[PTW.scala:220:14]
input io_dpath_pmp_4_cfg_l, // @[PTW.scala:220:14]
input [1:0] io_dpath_pmp_4_cfg_a, // @[PTW.scala:220:14]
input io_dpath_pmp_4_cfg_x, // @[PTW.scala:220:14]
input io_dpath_pmp_4_cfg_w, // @[PTW.scala:220:14]
input io_dpath_pmp_4_cfg_r, // @[PTW.scala:220:14]
input [29:0] io_dpath_pmp_4_addr, // @[PTW.scala:220:14]
input [31:0] io_dpath_pmp_4_mask, // @[PTW.scala:220:14]
input io_dpath_pmp_5_cfg_l, // @[PTW.scala:220:14]
input [1:0] io_dpath_pmp_5_cfg_a, // @[PTW.scala:220:14]
input io_dpath_pmp_5_cfg_x, // @[PTW.scala:220:14]
input io_dpath_pmp_5_cfg_w, // @[PTW.scala:220:14]
input io_dpath_pmp_5_cfg_r, // @[PTW.scala:220:14]
input [29:0] io_dpath_pmp_5_addr, // @[PTW.scala:220:14]
input [31:0] io_dpath_pmp_5_mask, // @[PTW.scala:220:14]
input io_dpath_pmp_6_cfg_l, // @[PTW.scala:220:14]
input [1:0] io_dpath_pmp_6_cfg_a, // @[PTW.scala:220:14]
input io_dpath_pmp_6_cfg_x, // @[PTW.scala:220:14]
input io_dpath_pmp_6_cfg_w, // @[PTW.scala:220:14]
input io_dpath_pmp_6_cfg_r, // @[PTW.scala:220:14]
input [29:0] io_dpath_pmp_6_addr, // @[PTW.scala:220:14]
input [31:0] io_dpath_pmp_6_mask, // @[PTW.scala:220:14]
input io_dpath_pmp_7_cfg_l, // @[PTW.scala:220:14]
input [1:0] io_dpath_pmp_7_cfg_a, // @[PTW.scala:220:14]
input io_dpath_pmp_7_cfg_x, // @[PTW.scala:220:14]
input io_dpath_pmp_7_cfg_w, // @[PTW.scala:220:14]
input io_dpath_pmp_7_cfg_r, // @[PTW.scala:220:14]
input [29:0] io_dpath_pmp_7_addr, // @[PTW.scala:220:14]
input [31:0] io_dpath_pmp_7_mask, // @[PTW.scala:220:14]
output io_dpath_perf_pte_miss, // @[PTW.scala:220:14]
output io_dpath_perf_pte_hit, // @[PTW.scala:220:14]
input io_dpath_customCSRs_csrs_0_ren, // @[PTW.scala:220:14]
input io_dpath_customCSRs_csrs_0_wen, // @[PTW.scala:220:14]
input [63:0] io_dpath_customCSRs_csrs_0_wdata, // @[PTW.scala:220:14]
input [63:0] io_dpath_customCSRs_csrs_0_value, // @[PTW.scala:220:14]
input io_dpath_customCSRs_csrs_1_ren, // @[PTW.scala:220:14]
input io_dpath_customCSRs_csrs_1_wen, // @[PTW.scala:220:14]
input [63:0] io_dpath_customCSRs_csrs_1_wdata, // @[PTW.scala:220:14]
input [63:0] io_dpath_customCSRs_csrs_1_value, // @[PTW.scala:220:14]
input io_dpath_customCSRs_csrs_2_ren, // @[PTW.scala:220:14]
input io_dpath_customCSRs_csrs_2_wen, // @[PTW.scala:220:14]
input [63:0] io_dpath_customCSRs_csrs_2_wdata, // @[PTW.scala:220:14]
input [63:0] io_dpath_customCSRs_csrs_2_value, // @[PTW.scala:220:14]
input io_dpath_customCSRs_csrs_3_ren, // @[PTW.scala:220:14]
input io_dpath_customCSRs_csrs_3_wen, // @[PTW.scala:220:14]
input [63:0] io_dpath_customCSRs_csrs_3_wdata, // @[PTW.scala:220:14]
input [63:0] io_dpath_customCSRs_csrs_3_value, // @[PTW.scala:220:14]
output io_dpath_clock_enabled // @[PTW.scala:220:14]
);
wire tmp_r; // @[PTW.scala:304:37]
wire tmp_w; // @[PTW.scala:304:37]
wire tmp_x; // @[PTW.scala:304:37]
wire tmp_u; // @[PTW.scala:304:37]
wire tmp_g; // @[PTW.scala:304:37]
wire tmp_a; // @[PTW.scala:304:37]
wire tmp_d; // @[PTW.scala:304:37]
wire [1:0] tmp_reserved_for_software; // @[PTW.scala:304:37]
wire [9:0] tmp_reserved_for_future; // @[PTW.scala:304:37]
wire [9:0] _r_pte_barrier_io_y_reserved_for_future; // @[package.scala:267:25]
wire [43:0] _r_pte_barrier_io_y_ppn; // @[package.scala:267:25]
wire [1:0] _r_pte_barrier_io_y_reserved_for_software; // @[package.scala:267:25]
wire _r_pte_barrier_io_y_d; // @[package.scala:267:25]
wire _r_pte_barrier_io_y_a; // @[package.scala:267:25]
wire _r_pte_barrier_io_y_g; // @[package.scala:267:25]
wire _r_pte_barrier_io_y_u; // @[package.scala:267:25]
wire _r_pte_barrier_io_y_x; // @[package.scala:267:25]
wire _r_pte_barrier_io_y_w; // @[package.scala:267:25]
wire _r_pte_barrier_io_y_r; // @[package.scala:267:25]
wire _r_pte_barrier_io_y_v; // @[package.scala:267:25]
wire [2:0] _state_barrier_io_y; // @[package.scala:267:25]
wire _arb_io_out_valid; // @[PTW.scala:236:19]
wire _arb_io_out_bits_valid; // @[PTW.scala:236:19]
wire [26:0] _arb_io_out_bits_bits_addr; // @[PTW.scala:236:19]
wire _arb_io_out_bits_bits_need_gpa; // @[PTW.scala:236:19]
wire _arb_io_chosen; // @[PTW.scala:236:19]
wire io_requestor_0_req_valid_0 = io_requestor_0_req_valid; // @[PTW.scala:219:7]
wire [26:0] io_requestor_0_req_bits_bits_addr_0 = io_requestor_0_req_bits_bits_addr; // @[PTW.scala:219:7]
wire io_requestor_0_req_bits_bits_need_gpa_0 = io_requestor_0_req_bits_bits_need_gpa; // @[PTW.scala:219:7]
wire io_requestor_1_req_valid_0 = io_requestor_1_req_valid; // @[PTW.scala:219:7]
wire io_requestor_1_req_bits_valid_0 = io_requestor_1_req_bits_valid; // @[PTW.scala:219:7]
wire [26:0] io_requestor_1_req_bits_bits_addr_0 = io_requestor_1_req_bits_bits_addr; // @[PTW.scala:219:7]
wire io_requestor_1_req_bits_bits_need_gpa_0 = io_requestor_1_req_bits_bits_need_gpa; // @[PTW.scala:219:7]
wire io_mem_req_ready_0 = io_mem_req_ready; // @[PTW.scala:219:7]
wire io_mem_s2_nack_0 = io_mem_s2_nack; // @[PTW.scala:219:7]
wire io_mem_s2_nack_cause_raw_0 = io_mem_s2_nack_cause_raw; // @[PTW.scala:219:7]
wire io_mem_s2_uncached_0 = io_mem_s2_uncached; // @[PTW.scala:219:7]
wire [31:0] io_mem_s2_paddr_0 = io_mem_s2_paddr; // @[PTW.scala:219:7]
wire io_mem_resp_valid_0 = io_mem_resp_valid; // @[PTW.scala:219:7]
wire [39:0] io_mem_resp_bits_addr_0 = io_mem_resp_bits_addr; // @[PTW.scala:219:7]
wire [6:0] io_mem_resp_bits_tag_0 = io_mem_resp_bits_tag; // @[PTW.scala:219:7]
wire [4:0] io_mem_resp_bits_cmd_0 = io_mem_resp_bits_cmd; // @[PTW.scala:219:7]
wire [1:0] io_mem_resp_bits_size_0 = io_mem_resp_bits_size; // @[PTW.scala:219:7]
wire io_mem_resp_bits_signed_0 = io_mem_resp_bits_signed; // @[PTW.scala:219:7]
wire [1:0] io_mem_resp_bits_dprv_0 = io_mem_resp_bits_dprv; // @[PTW.scala:219:7]
wire io_mem_resp_bits_dv_0 = io_mem_resp_bits_dv; // @[PTW.scala:219:7]
wire [63:0] io_mem_resp_bits_data_0 = io_mem_resp_bits_data; // @[PTW.scala:219:7]
wire [7:0] io_mem_resp_bits_mask_0 = io_mem_resp_bits_mask; // @[PTW.scala:219:7]
wire io_mem_resp_bits_replay_0 = io_mem_resp_bits_replay; // @[PTW.scala:219:7]
wire io_mem_resp_bits_has_data_0 = io_mem_resp_bits_has_data; // @[PTW.scala:219:7]
wire [63:0] io_mem_resp_bits_data_word_bypass_0 = io_mem_resp_bits_data_word_bypass; // @[PTW.scala:219:7]
wire [63:0] io_mem_resp_bits_data_raw_0 = io_mem_resp_bits_data_raw; // @[PTW.scala:219:7]
wire [63:0] io_mem_resp_bits_store_data_0 = io_mem_resp_bits_store_data; // @[PTW.scala:219:7]
wire io_mem_replay_next_0 = io_mem_replay_next; // @[PTW.scala:219:7]
wire io_mem_s2_xcpt_ma_ld_0 = io_mem_s2_xcpt_ma_ld; // @[PTW.scala:219:7]
wire io_mem_s2_xcpt_ma_st_0 = io_mem_s2_xcpt_ma_st; // @[PTW.scala:219:7]
wire io_mem_s2_xcpt_pf_ld_0 = io_mem_s2_xcpt_pf_ld; // @[PTW.scala:219:7]
wire io_mem_s2_xcpt_pf_st_0 = io_mem_s2_xcpt_pf_st; // @[PTW.scala:219:7]
wire io_mem_s2_xcpt_ae_ld_0 = io_mem_s2_xcpt_ae_ld; // @[PTW.scala:219:7]
wire io_mem_s2_xcpt_ae_st_0 = io_mem_s2_xcpt_ae_st; // @[PTW.scala:219:7]
wire [39:0] io_mem_s2_gpa_0 = io_mem_s2_gpa; // @[PTW.scala:219:7]
wire io_mem_ordered_0 = io_mem_ordered; // @[PTW.scala:219:7]
wire io_mem_store_pending_0 = io_mem_store_pending; // @[PTW.scala:219:7]
wire io_mem_perf_acquire_0 = io_mem_perf_acquire; // @[PTW.scala:219:7]
wire io_mem_perf_release_0 = io_mem_perf_release; // @[PTW.scala:219:7]
wire io_mem_perf_grant_0 = io_mem_perf_grant; // @[PTW.scala:219:7]
wire io_mem_perf_tlbMiss_0 = io_mem_perf_tlbMiss; // @[PTW.scala:219:7]
wire io_mem_perf_blocked_0 = io_mem_perf_blocked; // @[PTW.scala:219:7]
wire io_mem_perf_canAcceptStoreThenLoad_0 = io_mem_perf_canAcceptStoreThenLoad; // @[PTW.scala:219:7]
wire io_mem_perf_canAcceptStoreThenRMW_0 = io_mem_perf_canAcceptStoreThenRMW; // @[PTW.scala:219:7]
wire io_mem_perf_canAcceptLoadThenLoad_0 = io_mem_perf_canAcceptLoadThenLoad; // @[PTW.scala:219:7]
wire io_mem_perf_storeBufferEmptyAfterLoad_0 = io_mem_perf_storeBufferEmptyAfterLoad; // @[PTW.scala:219:7]
wire io_mem_perf_storeBufferEmptyAfterStore_0 = io_mem_perf_storeBufferEmptyAfterStore; // @[PTW.scala:219:7]
wire [3:0] io_dpath_ptbr_mode_0 = io_dpath_ptbr_mode; // @[PTW.scala:219:7]
wire [43:0] io_dpath_ptbr_ppn_0 = io_dpath_ptbr_ppn; // @[PTW.scala:219:7]
wire io_dpath_sfence_valid_0 = io_dpath_sfence_valid; // @[PTW.scala:219:7]
wire io_dpath_sfence_bits_rs1_0 = io_dpath_sfence_bits_rs1; // @[PTW.scala:219:7]
wire io_dpath_sfence_bits_rs2_0 = io_dpath_sfence_bits_rs2; // @[PTW.scala:219:7]
wire [38:0] io_dpath_sfence_bits_addr_0 = io_dpath_sfence_bits_addr; // @[PTW.scala:219:7]
wire io_dpath_sfence_bits_asid_0 = io_dpath_sfence_bits_asid; // @[PTW.scala:219:7]
wire io_dpath_sfence_bits_hv_0 = io_dpath_sfence_bits_hv; // @[PTW.scala:219:7]
wire io_dpath_sfence_bits_hg_0 = io_dpath_sfence_bits_hg; // @[PTW.scala:219:7]
wire io_dpath_status_debug_0 = io_dpath_status_debug; // @[PTW.scala:219:7]
wire io_dpath_status_cease_0 = io_dpath_status_cease; // @[PTW.scala:219:7]
wire io_dpath_status_wfi_0 = io_dpath_status_wfi; // @[PTW.scala:219:7]
wire [31:0] io_dpath_status_isa_0 = io_dpath_status_isa; // @[PTW.scala:219:7]
wire [1:0] io_dpath_status_dprv_0 = io_dpath_status_dprv; // @[PTW.scala:219:7]
wire io_dpath_status_dv_0 = io_dpath_status_dv; // @[PTW.scala:219:7]
wire [1:0] io_dpath_status_prv_0 = io_dpath_status_prv; // @[PTW.scala:219:7]
wire io_dpath_status_v_0 = io_dpath_status_v; // @[PTW.scala:219:7]
wire io_dpath_status_sd_0 = io_dpath_status_sd; // @[PTW.scala:219:7]
wire io_dpath_status_mpv_0 = io_dpath_status_mpv; // @[PTW.scala:219:7]
wire io_dpath_status_gva_0 = io_dpath_status_gva; // @[PTW.scala:219:7]
wire io_dpath_status_tsr_0 = io_dpath_status_tsr; // @[PTW.scala:219:7]
wire io_dpath_status_tw_0 = io_dpath_status_tw; // @[PTW.scala:219:7]
wire io_dpath_status_tvm_0 = io_dpath_status_tvm; // @[PTW.scala:219:7]
wire io_dpath_status_mxr_0 = io_dpath_status_mxr; // @[PTW.scala:219:7]
wire io_dpath_status_sum_0 = io_dpath_status_sum; // @[PTW.scala:219:7]
wire io_dpath_status_mprv_0 = io_dpath_status_mprv; // @[PTW.scala:219:7]
wire [1:0] io_dpath_status_fs_0 = io_dpath_status_fs; // @[PTW.scala:219:7]
wire [1:0] io_dpath_status_mpp_0 = io_dpath_status_mpp; // @[PTW.scala:219:7]
wire io_dpath_status_spp_0 = io_dpath_status_spp; // @[PTW.scala:219:7]
wire io_dpath_status_mpie_0 = io_dpath_status_mpie; // @[PTW.scala:219:7]
wire io_dpath_status_spie_0 = io_dpath_status_spie; // @[PTW.scala:219:7]
wire io_dpath_status_mie_0 = io_dpath_status_mie; // @[PTW.scala:219:7]
wire io_dpath_status_sie_0 = io_dpath_status_sie; // @[PTW.scala:219:7]
wire io_dpath_hstatus_spvp_0 = io_dpath_hstatus_spvp; // @[PTW.scala:219:7]
wire io_dpath_hstatus_spv_0 = io_dpath_hstatus_spv; // @[PTW.scala:219:7]
wire io_dpath_hstatus_gva_0 = io_dpath_hstatus_gva; // @[PTW.scala:219:7]
wire io_dpath_gstatus_debug_0 = io_dpath_gstatus_debug; // @[PTW.scala:219:7]
wire io_dpath_gstatus_cease_0 = io_dpath_gstatus_cease; // @[PTW.scala:219:7]
wire io_dpath_gstatus_wfi_0 = io_dpath_gstatus_wfi; // @[PTW.scala:219:7]
wire [31:0] io_dpath_gstatus_isa_0 = io_dpath_gstatus_isa; // @[PTW.scala:219:7]
wire [1:0] io_dpath_gstatus_dprv_0 = io_dpath_gstatus_dprv; // @[PTW.scala:219:7]
wire io_dpath_gstatus_dv_0 = io_dpath_gstatus_dv; // @[PTW.scala:219:7]
wire [1:0] io_dpath_gstatus_prv_0 = io_dpath_gstatus_prv; // @[PTW.scala:219:7]
wire io_dpath_gstatus_v_0 = io_dpath_gstatus_v; // @[PTW.scala:219:7]
wire io_dpath_gstatus_sd_0 = io_dpath_gstatus_sd; // @[PTW.scala:219:7]
wire [22:0] io_dpath_gstatus_zero2_0 = io_dpath_gstatus_zero2; // @[PTW.scala:219:7]
wire io_dpath_gstatus_mpv_0 = io_dpath_gstatus_mpv; // @[PTW.scala:219:7]
wire io_dpath_gstatus_gva_0 = io_dpath_gstatus_gva; // @[PTW.scala:219:7]
wire io_dpath_gstatus_mbe_0 = io_dpath_gstatus_mbe; // @[PTW.scala:219:7]
wire io_dpath_gstatus_sbe_0 = io_dpath_gstatus_sbe; // @[PTW.scala:219:7]
wire [1:0] io_dpath_gstatus_sxl_0 = io_dpath_gstatus_sxl; // @[PTW.scala:219:7]
wire [7:0] io_dpath_gstatus_zero1_0 = io_dpath_gstatus_zero1; // @[PTW.scala:219:7]
wire io_dpath_gstatus_tsr_0 = io_dpath_gstatus_tsr; // @[PTW.scala:219:7]
wire io_dpath_gstatus_tw_0 = io_dpath_gstatus_tw; // @[PTW.scala:219:7]
wire io_dpath_gstatus_tvm_0 = io_dpath_gstatus_tvm; // @[PTW.scala:219:7]
wire io_dpath_gstatus_mxr_0 = io_dpath_gstatus_mxr; // @[PTW.scala:219:7]
wire io_dpath_gstatus_sum_0 = io_dpath_gstatus_sum; // @[PTW.scala:219:7]
wire io_dpath_gstatus_mprv_0 = io_dpath_gstatus_mprv; // @[PTW.scala:219:7]
wire [1:0] io_dpath_gstatus_fs_0 = io_dpath_gstatus_fs; // @[PTW.scala:219:7]
wire [1:0] io_dpath_gstatus_mpp_0 = io_dpath_gstatus_mpp; // @[PTW.scala:219:7]
wire [1:0] io_dpath_gstatus_vs_0 = io_dpath_gstatus_vs; // @[PTW.scala:219:7]
wire io_dpath_gstatus_spp_0 = io_dpath_gstatus_spp; // @[PTW.scala:219:7]
wire io_dpath_gstatus_mpie_0 = io_dpath_gstatus_mpie; // @[PTW.scala:219:7]
wire io_dpath_gstatus_ube_0 = io_dpath_gstatus_ube; // @[PTW.scala:219:7]
wire io_dpath_gstatus_spie_0 = io_dpath_gstatus_spie; // @[PTW.scala:219:7]
wire io_dpath_gstatus_upie_0 = io_dpath_gstatus_upie; // @[PTW.scala:219:7]
wire io_dpath_gstatus_mie_0 = io_dpath_gstatus_mie; // @[PTW.scala:219:7]
wire io_dpath_gstatus_hie_0 = io_dpath_gstatus_hie; // @[PTW.scala:219:7]
wire io_dpath_gstatus_sie_0 = io_dpath_gstatus_sie; // @[PTW.scala:219:7]
wire io_dpath_gstatus_uie_0 = io_dpath_gstatus_uie; // @[PTW.scala:219:7]
wire io_dpath_pmp_0_cfg_l_0 = io_dpath_pmp_0_cfg_l; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_0_cfg_a_0 = io_dpath_pmp_0_cfg_a; // @[PTW.scala:219:7]
wire io_dpath_pmp_0_cfg_x_0 = io_dpath_pmp_0_cfg_x; // @[PTW.scala:219:7]
wire io_dpath_pmp_0_cfg_w_0 = io_dpath_pmp_0_cfg_w; // @[PTW.scala:219:7]
wire io_dpath_pmp_0_cfg_r_0 = io_dpath_pmp_0_cfg_r; // @[PTW.scala:219:7]
wire [29:0] io_dpath_pmp_0_addr_0 = io_dpath_pmp_0_addr; // @[PTW.scala:219:7]
wire [31:0] io_dpath_pmp_0_mask_0 = io_dpath_pmp_0_mask; // @[PTW.scala:219:7]
wire io_dpath_pmp_1_cfg_l_0 = io_dpath_pmp_1_cfg_l; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_1_cfg_a_0 = io_dpath_pmp_1_cfg_a; // @[PTW.scala:219:7]
wire io_dpath_pmp_1_cfg_x_0 = io_dpath_pmp_1_cfg_x; // @[PTW.scala:219:7]
wire io_dpath_pmp_1_cfg_w_0 = io_dpath_pmp_1_cfg_w; // @[PTW.scala:219:7]
wire io_dpath_pmp_1_cfg_r_0 = io_dpath_pmp_1_cfg_r; // @[PTW.scala:219:7]
wire [29:0] io_dpath_pmp_1_addr_0 = io_dpath_pmp_1_addr; // @[PTW.scala:219:7]
wire [31:0] io_dpath_pmp_1_mask_0 = io_dpath_pmp_1_mask; // @[PTW.scala:219:7]
wire io_dpath_pmp_2_cfg_l_0 = io_dpath_pmp_2_cfg_l; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_2_cfg_a_0 = io_dpath_pmp_2_cfg_a; // @[PTW.scala:219:7]
wire io_dpath_pmp_2_cfg_x_0 = io_dpath_pmp_2_cfg_x; // @[PTW.scala:219:7]
wire io_dpath_pmp_2_cfg_w_0 = io_dpath_pmp_2_cfg_w; // @[PTW.scala:219:7]
wire io_dpath_pmp_2_cfg_r_0 = io_dpath_pmp_2_cfg_r; // @[PTW.scala:219:7]
wire [29:0] io_dpath_pmp_2_addr_0 = io_dpath_pmp_2_addr; // @[PTW.scala:219:7]
wire [31:0] io_dpath_pmp_2_mask_0 = io_dpath_pmp_2_mask; // @[PTW.scala:219:7]
wire io_dpath_pmp_3_cfg_l_0 = io_dpath_pmp_3_cfg_l; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_3_cfg_a_0 = io_dpath_pmp_3_cfg_a; // @[PTW.scala:219:7]
wire io_dpath_pmp_3_cfg_x_0 = io_dpath_pmp_3_cfg_x; // @[PTW.scala:219:7]
wire io_dpath_pmp_3_cfg_w_0 = io_dpath_pmp_3_cfg_w; // @[PTW.scala:219:7]
wire io_dpath_pmp_3_cfg_r_0 = io_dpath_pmp_3_cfg_r; // @[PTW.scala:219:7]
wire [29:0] io_dpath_pmp_3_addr_0 = io_dpath_pmp_3_addr; // @[PTW.scala:219:7]
wire [31:0] io_dpath_pmp_3_mask_0 = io_dpath_pmp_3_mask; // @[PTW.scala:219:7]
wire io_dpath_pmp_4_cfg_l_0 = io_dpath_pmp_4_cfg_l; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_4_cfg_a_0 = io_dpath_pmp_4_cfg_a; // @[PTW.scala:219:7]
wire io_dpath_pmp_4_cfg_x_0 = io_dpath_pmp_4_cfg_x; // @[PTW.scala:219:7]
wire io_dpath_pmp_4_cfg_w_0 = io_dpath_pmp_4_cfg_w; // @[PTW.scala:219:7]
wire io_dpath_pmp_4_cfg_r_0 = io_dpath_pmp_4_cfg_r; // @[PTW.scala:219:7]
wire [29:0] io_dpath_pmp_4_addr_0 = io_dpath_pmp_4_addr; // @[PTW.scala:219:7]
wire [31:0] io_dpath_pmp_4_mask_0 = io_dpath_pmp_4_mask; // @[PTW.scala:219:7]
wire io_dpath_pmp_5_cfg_l_0 = io_dpath_pmp_5_cfg_l; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_5_cfg_a_0 = io_dpath_pmp_5_cfg_a; // @[PTW.scala:219:7]
wire io_dpath_pmp_5_cfg_x_0 = io_dpath_pmp_5_cfg_x; // @[PTW.scala:219:7]
wire io_dpath_pmp_5_cfg_w_0 = io_dpath_pmp_5_cfg_w; // @[PTW.scala:219:7]
wire io_dpath_pmp_5_cfg_r_0 = io_dpath_pmp_5_cfg_r; // @[PTW.scala:219:7]
wire [29:0] io_dpath_pmp_5_addr_0 = io_dpath_pmp_5_addr; // @[PTW.scala:219:7]
wire [31:0] io_dpath_pmp_5_mask_0 = io_dpath_pmp_5_mask; // @[PTW.scala:219:7]
wire io_dpath_pmp_6_cfg_l_0 = io_dpath_pmp_6_cfg_l; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_6_cfg_a_0 = io_dpath_pmp_6_cfg_a; // @[PTW.scala:219:7]
wire io_dpath_pmp_6_cfg_x_0 = io_dpath_pmp_6_cfg_x; // @[PTW.scala:219:7]
wire io_dpath_pmp_6_cfg_w_0 = io_dpath_pmp_6_cfg_w; // @[PTW.scala:219:7]
wire io_dpath_pmp_6_cfg_r_0 = io_dpath_pmp_6_cfg_r; // @[PTW.scala:219:7]
wire [29:0] io_dpath_pmp_6_addr_0 = io_dpath_pmp_6_addr; // @[PTW.scala:219:7]
wire [31:0] io_dpath_pmp_6_mask_0 = io_dpath_pmp_6_mask; // @[PTW.scala:219:7]
wire io_dpath_pmp_7_cfg_l_0 = io_dpath_pmp_7_cfg_l; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_7_cfg_a_0 = io_dpath_pmp_7_cfg_a; // @[PTW.scala:219:7]
wire io_dpath_pmp_7_cfg_x_0 = io_dpath_pmp_7_cfg_x; // @[PTW.scala:219:7]
wire io_dpath_pmp_7_cfg_w_0 = io_dpath_pmp_7_cfg_w; // @[PTW.scala:219:7]
wire io_dpath_pmp_7_cfg_r_0 = io_dpath_pmp_7_cfg_r; // @[PTW.scala:219:7]
wire [29:0] io_dpath_pmp_7_addr_0 = io_dpath_pmp_7_addr; // @[PTW.scala:219:7]
wire [31:0] io_dpath_pmp_7_mask_0 = io_dpath_pmp_7_mask; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_0_ren_0 = io_dpath_customCSRs_csrs_0_ren; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_0_wen_0 = io_dpath_customCSRs_csrs_0_wen; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_0_wdata_0 = io_dpath_customCSRs_csrs_0_wdata; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_0_value_0 = io_dpath_customCSRs_csrs_0_value; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_1_ren_0 = io_dpath_customCSRs_csrs_1_ren; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_1_wen_0 = io_dpath_customCSRs_csrs_1_wen; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_1_wdata_0 = io_dpath_customCSRs_csrs_1_wdata; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_1_value_0 = io_dpath_customCSRs_csrs_1_value; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_2_ren_0 = io_dpath_customCSRs_csrs_2_ren; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_2_wen_0 = io_dpath_customCSRs_csrs_2_wen; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_2_wdata_0 = io_dpath_customCSRs_csrs_2_wdata; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_2_value_0 = io_dpath_customCSRs_csrs_2_value; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_3_ren_0 = io_dpath_customCSRs_csrs_3_ren; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_3_wen_0 = io_dpath_customCSRs_csrs_3_wen; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_3_wdata_0 = io_dpath_customCSRs_csrs_3_wdata; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_3_value_0 = io_dpath_customCSRs_csrs_3_value; // @[PTW.scala:219:7]
wire io_requestor_0_req_bits_bits_vstage1 = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_req_bits_bits_stage2 = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_fragmented_superpage = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_status_mbe = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_status_sbe = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_status_sd_rv32 = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_status_ube = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_status_upie = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_status_hie = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_status_uie = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_hstatus_vtsr = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_hstatus_vtw = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_hstatus_vtvm = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_hstatus_hu = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_hstatus_vsbe = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_sd_rv32 = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_0_stall = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_0_set = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_1_stall = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_1_set = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_2_stall = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_2_set = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_3_stall = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_3_set = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_req_bits_bits_vstage1 = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_req_bits_bits_stage2 = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_fragmented_superpage = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_status_mbe = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_status_sbe = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_status_sd_rv32 = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_status_ube = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_status_upie = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_status_hie = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_status_uie = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_hstatus_vtsr = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_hstatus_vtw = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_hstatus_vtvm = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_hstatus_hu = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_hstatus_vsbe = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_sd_rv32 = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_0_stall = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_0_set = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_1_stall = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_1_set = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_2_stall = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_2_set = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_3_stall = 1'h0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_3_set = 1'h0; // @[PTW.scala:219:7]
wire io_mem_req_bits_signed = 1'h0; // @[PTW.scala:219:7]
wire io_mem_req_bits_no_resp = 1'h0; // @[PTW.scala:219:7]
wire io_mem_req_bits_no_alloc = 1'h0; // @[PTW.scala:219:7]
wire io_mem_req_bits_no_xcpt = 1'h0; // @[PTW.scala:219:7]
wire io_mem_s2_kill = 1'h0; // @[PTW.scala:219:7]
wire io_mem_s2_xcpt_gf_ld = 1'h0; // @[PTW.scala:219:7]
wire io_mem_s2_xcpt_gf_st = 1'h0; // @[PTW.scala:219:7]
wire io_mem_s2_gpa_is_pte = 1'h0; // @[PTW.scala:219:7]
wire io_mem_keep_clock_enabled = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_status_mbe = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_status_sbe = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_status_sd_rv32 = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_status_ube = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_status_upie = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_status_hie = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_status_uie = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_hstatus_vtsr = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_hstatus_vtw = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_hstatus_vtvm = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_hstatus_hu = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_hstatus_vsbe = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_gstatus_sd_rv32 = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_perf_l2miss = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_perf_l2hit = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_0_stall = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_0_set = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_1_stall = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_1_set = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_2_stall = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_2_set = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_3_stall = 1'h0; // @[PTW.scala:219:7]
wire io_dpath_customCSRs_csrs_3_set = 1'h0; // @[PTW.scala:219:7]
wire _resp_valid_WIRE_0 = 1'h0; // @[PTW.scala:242:35]
wire _resp_valid_WIRE_1 = 1'h0; // @[PTW.scala:242:35]
wire _hits_T_9 = 1'h0; // @[PTW.scala:366:27]
wire _hits_T_10 = 1'h0; // @[PTW.scala:366:27]
wire _hits_T_11 = 1'h0; // @[PTW.scala:366:27]
wire _hits_T_12 = 1'h0; // @[PTW.scala:366:27]
wire _hits_T_13 = 1'h0; // @[PTW.scala:366:27]
wire _hits_T_14 = 1'h0; // @[PTW.scala:366:27]
wire _hits_T_15 = 1'h0; // @[PTW.scala:366:27]
wire _hits_T_16 = 1'h0; // @[PTW.scala:366:27]
wire _hit_T_1 = 1'h0; // @[PTW.scala:367:20]
wire stage2_pte_cache_hit = 1'h0; // @[PTW.scala:367:24]
wire _state_reg_set_left_older_T_9 = 1'h0; // @[Replacement.scala:196:43]
wire _state_reg_set_left_older_T_10 = 1'h0; // @[Replacement.scala:196:43]
wire _state_reg_T_70 = 1'h0; // @[package.scala:163:13]
wire _state_reg_T_71 = 1'h0; // @[Replacement.scala:218:17]
wire _state_reg_T_74 = 1'h0; // @[Replacement.scala:207:62]
wire _state_reg_T_75 = 1'h0; // @[Replacement.scala:218:17]
wire _state_reg_set_left_older_T_11 = 1'h0; // @[Replacement.scala:196:43]
wire _state_reg_T_81 = 1'h0; // @[package.scala:163:13]
wire _state_reg_T_82 = 1'h0; // @[Replacement.scala:218:17]
wire _state_reg_T_85 = 1'h0; // @[Replacement.scala:207:62]
wire _state_reg_T_86 = 1'h0; // @[Replacement.scala:218:17]
wire l2_pte_d = 1'h0; // @[PTW.scala:403:113]
wire l2_pte_a = 1'h0; // @[PTW.scala:403:113]
wire l2_pte_g = 1'h0; // @[PTW.scala:403:113]
wire l2_pte_u = 1'h0; // @[PTW.scala:403:113]
wire l2_pte_x = 1'h0; // @[PTW.scala:403:113]
wire l2_pte_w = 1'h0; // @[PTW.scala:403:113]
wire l2_pte_r = 1'h0; // @[PTW.scala:403:113]
wire l2_pte_v = 1'h0; // @[PTW.scala:403:113]
wire _pmpHomogeneous_WIRE_cfg_l = 1'h0; // @[PMP.scala:137:40]
wire _pmpHomogeneous_WIRE_cfg_x = 1'h0; // @[PMP.scala:137:40]
wire _pmpHomogeneous_WIRE_cfg_w = 1'h0; // @[PMP.scala:137:40]
wire _pmpHomogeneous_WIRE_cfg_r = 1'h0; // @[PMP.scala:137:40]
wire _pmpHomogeneous_beginsAfterLower_T_4 = 1'h0; // @[PMP.scala:106:32]
wire pmpHomogeneous_endsBeforeLower = 1'h0; // @[PMP.scala:110:40]
wire _io_requestor_0_resp_bits_fragmented_superpage_T = 1'h0; // @[PTW.scala:563:81]
wire _io_requestor_1_resp_bits_fragmented_superpage_T = 1'h0; // @[PTW.scala:563:81]
wire _stage2_final_T_1 = 1'h0; // @[PTW.scala:595:53]
wire _resp_gf_T_2 = 1'h0; // @[PTW.scala:603:71]
wire _r_pte_T_1 = 1'h0; // @[PTW.scala:670:16]
wire _r_pte_T_3 = 1'h0; // @[PTW.scala:670:29]
wire _r_pte_T_5 = 1'h0; // @[PTW.scala:672:25]
wire r_pte_idxs_0 = 1'h0; // @[PTW.scala:778:58]
wire r_pte_pte_d = 1'h0; // @[PTW.scala:780:26]
wire r_pte_pte_a = 1'h0; // @[PTW.scala:780:26]
wire r_pte_pte_g = 1'h0; // @[PTW.scala:780:26]
wire r_pte_pte_u = 1'h0; // @[PTW.scala:780:26]
wire r_pte_pte_x = 1'h0; // @[PTW.scala:780:26]
wire r_pte_pte_w = 1'h0; // @[PTW.scala:780:26]
wire r_pte_pte_r = 1'h0; // @[PTW.scala:780:26]
wire r_pte_pte_v = 1'h0; // @[PTW.scala:780:26]
wire r_pte_pte_1_d = 1'h0; // @[PTW.scala:771:26]
wire r_pte_pte_1_a = 1'h0; // @[PTW.scala:771:26]
wire r_pte_pte_1_g = 1'h0; // @[PTW.scala:771:26]
wire r_pte_pte_1_u = 1'h0; // @[PTW.scala:771:26]
wire r_pte_pte_1_x = 1'h0; // @[PTW.scala:771:26]
wire r_pte_pte_1_w = 1'h0; // @[PTW.scala:771:26]
wire r_pte_pte_1_r = 1'h0; // @[PTW.scala:771:26]
wire r_pte_pte_1_v = 1'h0; // @[PTW.scala:771:26]
wire [15:0] io_requestor_0_ptbr_asid = 16'h0; // @[PTW.scala:219:7]
wire [15:0] io_requestor_0_hgatp_asid = 16'h0; // @[PTW.scala:219:7]
wire [15:0] io_requestor_0_vsatp_asid = 16'h0; // @[PTW.scala:219:7]
wire [15:0] io_requestor_1_ptbr_asid = 16'h0; // @[PTW.scala:219:7]
wire [15:0] io_requestor_1_hgatp_asid = 16'h0; // @[PTW.scala:219:7]
wire [15:0] io_requestor_1_vsatp_asid = 16'h0; // @[PTW.scala:219:7]
wire [15:0] io_dpath_ptbr_asid = 16'h0; // @[PTW.scala:219:7]
wire [15:0] io_dpath_hgatp_asid = 16'h0; // @[PTW.scala:219:7]
wire [15:0] io_dpath_vsatp_asid = 16'h0; // @[PTW.scala:219:7]
wire [15:0] satp_asid = 16'h0; // @[PTW.scala:285:17]
wire [3:0] io_requestor_0_hgatp_mode = 4'h0; // @[PTW.scala:219:7]
wire [3:0] io_requestor_0_vsatp_mode = 4'h0; // @[PTW.scala:219:7]
wire [3:0] io_requestor_1_hgatp_mode = 4'h0; // @[PTW.scala:219:7]
wire [3:0] io_requestor_1_vsatp_mode = 4'h0; // @[PTW.scala:219:7]
wire [3:0] io_dpath_hgatp_mode = 4'h0; // @[PTW.scala:219:7]
wire [3:0] io_dpath_vsatp_mode = 4'h0; // @[PTW.scala:219:7]
wire [3:0] hits_lo_1 = 4'h0; // @[package.scala:45:27]
wire [3:0] hits_hi_1 = 4'h0; // @[package.scala:45:27]
wire [3:0] hi_2 = 4'h0; // @[OneHot.scala:30:18]
wire [3:0] lo_2 = 4'h0; // @[OneHot.scala:31:18]
wire [43:0] io_requestor_0_hgatp_ppn = 44'h0; // @[PTW.scala:219:7]
wire [43:0] io_requestor_0_vsatp_ppn = 44'h0; // @[PTW.scala:219:7]
wire [43:0] io_requestor_1_hgatp_ppn = 44'h0; // @[PTW.scala:219:7]
wire [43:0] io_requestor_1_vsatp_ppn = 44'h0; // @[PTW.scala:219:7]
wire [43:0] io_dpath_hgatp_ppn = 44'h0; // @[PTW.scala:219:7]
wire [43:0] io_dpath_vsatp_ppn = 44'h0; // @[PTW.scala:219:7]
wire [43:0] l2_pte_ppn = 44'h0; // @[PTW.scala:403:113]
wire [43:0] r_pte_pte_4_ppn = 44'h0; // @[PTW.scala:780:26]
wire [43:0] _r_pte_pte_ppn_T_5 = 44'h0; // @[PTW.scala:781:19]
wire [22:0] io_requestor_0_status_zero2 = 23'h0; // @[PTW.scala:219:7]
wire [22:0] io_requestor_1_status_zero2 = 23'h0; // @[PTW.scala:219:7]
wire [22:0] io_dpath_status_zero2 = 23'h0; // @[PTW.scala:219:7]
wire [7:0] io_requestor_0_status_zero1 = 8'h0; // @[PTW.scala:219:7]
wire [7:0] io_requestor_1_status_zero1 = 8'h0; // @[PTW.scala:219:7]
wire [7:0] io_mem_req_bits_mask = 8'h0; // @[PTW.scala:219:7]
wire [7:0] io_mem_s1_data_mask = 8'h0; // @[PTW.scala:219:7]
wire [7:0] io_dpath_status_zero1 = 8'h0; // @[PTW.scala:219:7]
wire [7:0] _hits_T_17 = 8'h0; // @[package.scala:45:27]
wire [7:0] hits_1 = 8'h0; // @[PTW.scala:366:43]
wire [1:0] io_requestor_0_status_xs = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_status_vs = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_hstatus_zero3 = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_hstatus_zero2 = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_gstatus_xs = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_0_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_1_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_2_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_3_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_4_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_5_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_6_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_7_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_status_xs = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_status_vs = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_hstatus_zero3 = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_hstatus_zero2 = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_gstatus_xs = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_0_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_1_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_2_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_3_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_4_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_5_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_6_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_7_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_status_xs = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_status_vs = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_hstatus_zero3 = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_hstatus_zero2 = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_gstatus_xs = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_0_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_1_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_2_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_3_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_4_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_5_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_6_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] io_dpath_pmp_7_cfg_res = 2'h0; // @[PTW.scala:219:7]
wire [1:0] _r_hgatp_initial_count_T_1 = 2'h0; // @[PTW.scala:286:42]
wire [1:0] r_hgatp_initial_count = 2'h0; // @[PTW.scala:286:58]
wire [1:0] _count_T_1 = 2'h0; // @[PTW.scala:786:28]
wire [1:0] count_1 = 2'h0; // @[PTW.scala:786:44]
wire [1:0] hits_lo_lo_1 = 2'h0; // @[package.scala:45:27]
wire [1:0] hits_lo_hi_1 = 2'h0; // @[package.scala:45:27]
wire [1:0] hits_hi_lo_1 = 2'h0; // @[package.scala:45:27]
wire [1:0] hits_hi_hi_1 = 2'h0; // @[package.scala:45:27]
wire [1:0] hi_3 = 2'h0; // @[OneHot.scala:30:18]
wire [1:0] lo_3 = 2'h0; // @[OneHot.scala:31:18]
wire [1:0] _state_reg_T_69 = 2'h0; // @[package.scala:163:13]
wire [1:0] _state_reg_T_80 = 2'h0; // @[Replacement.scala:207:62]
wire [1:0] l2_pte_reserved_for_software = 2'h0; // @[PTW.scala:403:113]
wire [1:0] _pmpHomogeneous_WIRE_cfg_res = 2'h0; // @[PMP.scala:137:40]
wire [1:0] _pmpHomogeneous_WIRE_cfg_a = 2'h0; // @[PMP.scala:137:40]
wire [1:0] _satp_initial_count_T_1 = 2'h0; // @[PTW.scala:586:45]
wire [1:0] satp_initial_count = 2'h0; // @[PTW.scala:586:61]
wire [1:0] _vsatp_initial_count_T_1 = 2'h0; // @[PTW.scala:587:46]
wire [1:0] vsatp_initial_count = 2'h0; // @[PTW.scala:587:62]
wire [1:0] _hgatp_initial_count_T_1 = 2'h0; // @[PTW.scala:588:46]
wire [1:0] hgatp_initial_count = 2'h0; // @[PTW.scala:588:62]
wire [1:0] _count_T_3 = 2'h0; // @[PTW.scala:596:27]
wire [1:0] _aux_count_T = 2'h0; // @[PTW.scala:597:27]
wire [1:0] _resp_gf_count_T_1 = 2'h0; // @[PTW.scala:786:28]
wire [1:0] resp_gf_count = 2'h0; // @[PTW.scala:786:44]
wire [1:0] _resp_gf_T = 2'h0; // @[package.scala:24:40]
wire [1:0] _r_pte_count_T_1 = 2'h0; // @[PTW.scala:777:28]
wire [1:0] r_pte_count = 2'h0; // @[PTW.scala:777:44]
wire [1:0] r_pte_lsbs = 2'h0; // @[PTW.scala:779:27]
wire [1:0] r_pte_pte_reserved_for_software = 2'h0; // @[PTW.scala:780:26]
wire [1:0] r_pte_pte_1_reserved_for_software = 2'h0; // @[PTW.scala:771:26]
wire [1:0] _r_pte_count_T_4 = 2'h0; // @[PTW.scala:777:28]
wire [1:0] r_pte_count_1 = 2'h0; // @[PTW.scala:777:44]
wire [1:0] _r_pte_count_T_7 = 2'h0; // @[PTW.scala:777:28]
wire [1:0] r_pte_count_2 = 2'h0; // @[PTW.scala:777:44]
wire [1:0] r_pte_lsbs_2 = 2'h0; // @[PTW.scala:779:27]
wire [29:0] io_requestor_0_hstatus_zero6 = 30'h0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_1_hstatus_zero6 = 30'h0; // @[PTW.scala:219:7]
wire [29:0] io_dpath_hstatus_zero6 = 30'h0; // @[PTW.scala:219:7]
wire [29:0] _pmpHomogeneous_WIRE_addr = 30'h0; // @[PMP.scala:137:40]
wire [8:0] io_requestor_0_hstatus_zero5 = 9'h0; // @[PTW.scala:219:7]
wire [8:0] io_requestor_1_hstatus_zero5 = 9'h0; // @[PTW.scala:219:7]
wire [8:0] io_dpath_hstatus_zero5 = 9'h0; // @[PTW.scala:219:7]
wire [5:0] io_requestor_0_hstatus_vgein = 6'h0; // @[PTW.scala:219:7]
wire [5:0] io_requestor_1_hstatus_vgein = 6'h0; // @[PTW.scala:219:7]
wire [5:0] io_dpath_hstatus_vgein = 6'h0; // @[PTW.scala:219:7]
wire [4:0] io_requestor_0_hstatus_zero1 = 5'h0; // @[PTW.scala:219:7]
wire [4:0] io_requestor_1_hstatus_zero1 = 5'h0; // @[PTW.scala:219:7]
wire [4:0] io_mem_req_bits_cmd = 5'h0; // @[PTW.scala:219:7]
wire [4:0] io_dpath_hstatus_zero1 = 5'h0; // @[PTW.scala:219:7]
wire io_requestor_0_req_bits_valid = 1'h1; // @[PTW.scala:219:7]
wire io_mem_req_bits_phys = 1'h1; // @[PTW.scala:219:7]
wire io_mem_clock_enabled = 1'h1; // @[PTW.scala:219:7]
wire state_reg_set_left_older_9 = 1'h1; // @[Replacement.scala:196:33]
wire state_reg_set_left_older_10 = 1'h1; // @[Replacement.scala:196:33]
wire _state_reg_T_72 = 1'h1; // @[Replacement.scala:218:7]
wire _state_reg_T_76 = 1'h1; // @[Replacement.scala:218:7]
wire _state_reg_T_77 = 1'h1; // @[Replacement.scala:206:16]
wire state_reg_set_left_older_11 = 1'h1; // @[Replacement.scala:196:33]
wire _state_reg_T_83 = 1'h1; // @[Replacement.scala:218:7]
wire _state_reg_T_87 = 1'h1; // @[Replacement.scala:218:7]
wire _state_reg_T_88 = 1'h1; // @[Replacement.scala:206:16]
wire _io_dpath_perf_pte_hit_T_2 = 1'h1; // @[PTW.scala:394:60]
wire _pmaPgLevelHomogeneous_T_1 = 1'h1; // @[TLBPermissions.scala:87:22]
wire _pmaPgLevelHomogeneous_T_2 = 1'h1; // @[TLBPermissions.scala:87:22]
wire _pmaPgLevelHomogeneous_T_3 = 1'h1; // @[TLBPermissions.scala:87:22]
wire _pmaPgLevelHomogeneous_T_4 = 1'h1; // @[TLBPermissions.scala:87:22]
wire _pmaPgLevelHomogeneous_T_5 = 1'h1; // @[TLBPermissions.scala:87:22]
wire _pmaPgLevelHomogeneous_T_6 = 1'h1; // @[TLBPermissions.scala:87:22]
wire _pmaPgLevelHomogeneous_T_19 = 1'h1; // @[TLBPermissions.scala:87:22]
wire _pmaPgLevelHomogeneous_T_20 = 1'h1; // @[TLBPermissions.scala:87:22]
wire _pmaPgLevelHomogeneous_T_35 = 1'h1; // @[TLBPermissions.scala:87:22]
wire _pmaPgLevelHomogeneous_T_36 = 1'h1; // @[TLBPermissions.scala:87:22]
wire _pmaPgLevelHomogeneous_T_97 = 1'h1; // @[TLBPermissions.scala:87:22]
wire pmpHomogeneous_beginsAfterLower = 1'h1; // @[PMP.scala:106:28]
wire _stage2_final_T = 1'h1; // @[PTW.scala:595:56]
wire _r_pte_T = 1'h1; // @[PTW.scala:670:19]
wire [41:0] _r_pte_pte_ppn_T_4 = 42'h0; // @[PTW.scala:781:30]
wire [16:0] r_pte_idxs_0_2 = 17'h0; // @[PTW.scala:778:58]
wire [2:0] _r_hgatp_initial_count_T = 3'h0; // @[PTW.scala:286:42]
wire [2:0] _r_hgatp_initial_count_T_2 = 3'h0; // @[PTW.scala:286:58]
wire [2:0] _count_T = 3'h0; // @[PTW.scala:786:28]
wire [2:0] _count_T_2 = 3'h0; // @[PTW.scala:786:44]
wire [2:0] state_reg_touch_way_sized_3 = 3'h0; // @[package.scala:163:13]
wire [2:0] _satp_initial_count_T = 3'h0; // @[PTW.scala:586:45]
wire [2:0] _satp_initial_count_T_2 = 3'h0; // @[PTW.scala:586:61]
wire [2:0] _vsatp_initial_count_T = 3'h0; // @[PTW.scala:587:46]
wire [2:0] _vsatp_initial_count_T_2 = 3'h0; // @[PTW.scala:587:62]
wire [2:0] _hgatp_initial_count_T = 3'h0; // @[PTW.scala:588:46]
wire [2:0] _hgatp_initial_count_T_2 = 3'h0; // @[PTW.scala:588:62]
wire [2:0] _resp_gf_count_T = 3'h0; // @[PTW.scala:786:28]
wire [2:0] _resp_gf_count_T_2 = 3'h0; // @[PTW.scala:786:44]
wire [2:0] _r_pte_count_T = 3'h0; // @[PTW.scala:777:28]
wire [2:0] _r_pte_count_T_2 = 3'h0; // @[PTW.scala:777:44]
wire [2:0] _r_pte_count_T_3 = 3'h0; // @[PTW.scala:777:28]
wire [2:0] _r_pte_count_T_5 = 3'h0; // @[PTW.scala:777:44]
wire [2:0] _r_pte_count_T_6 = 3'h0; // @[PTW.scala:777:28]
wire [2:0] _r_pte_count_T_8 = 3'h0; // @[PTW.scala:777:44]
wire [19:0] stage2_pte_cache_data = 20'h0; // @[Mux.scala:30:73]
wire [31:0] _pmpHomogeneous_WIRE_mask = 32'h0; // @[PMP.scala:137:40]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_3 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_1 = 32'h0; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_4 = 32'h0; // @[PMP.scala:60:27]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_5 = 32'h0; // @[PMP.scala:110:58]
wire [1:0] io_requestor_0_status_sxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_status_uxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_hstatus_vsxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_gstatus_uxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_status_sxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_status_uxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_hstatus_vsxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_gstatus_uxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_dpath_status_sxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_dpath_status_uxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_dpath_hstatus_vsxl = 2'h2; // @[PTW.scala:219:7]
wire [1:0] io_dpath_gstatus_uxl = 2'h2; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_0_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_1_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_2_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_3_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_0_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_1_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_2_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_3_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_mem_req_bits_data = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_mem_s1_data_data = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_0_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_1_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_2_sdata = 64'h0; // @[PTW.scala:219:7]
wire [63:0] io_dpath_customCSRs_csrs_3_sdata = 64'h0; // @[PTW.scala:219:7]
wire [6:0] io_mem_req_bits_tag = 7'h0; // @[PTW.scala:219:7]
wire [1:0] io_mem_req_bits_size = 2'h3; // @[PTW.scala:219:7]
wire [1:0] io_mem_req_bits_dprv = 2'h1; // @[PTW.scala:219:7]
wire [9:0] l2_pte_reserved_for_future = 10'h0; // @[PTW.scala:403:113]
wire [9:0] r_pte_pte_reserved_for_future = 10'h0; // @[PTW.scala:780:26]
wire [9:0] r_pte_pte_1_reserved_for_future = 10'h0; // @[PTW.scala:771:26]
wire [2:0] _next_state_T_2 = 3'h4; // @[PTW.scala:636:24]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_1 = 32'hFFFFFFFF; // @[PMP.scala:60:29]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:48]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:29]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:48]
wire [39:0] tag_1 = 40'h8000000000; // @[PTW.scala:363:18]
wire [8:0] pte_addr_mask = 9'h1FF; // @[PTW.scala:324:23]
wire [38:0] _tag_T = 39'h0; // @[package.scala:138:15]
wire [1:0] max_count; // @[PTW.scala:289:25]
wire _io_requestor_0_resp_bits_homogeneous_T; // @[PTW.scala:562:58]
wire _io_requestor_0_resp_bits_gpa_is_pte_T; // @[PTW.scala:567:45]
wire _io_requestor_1_resp_bits_homogeneous_T; // @[PTW.scala:562:58]
wire _io_requestor_1_resp_bits_gpa_is_pte_T; // @[PTW.scala:567:45]
wire _io_mem_req_valid_T_2; // @[PTW.scala:515:39]
wire _io_mem_req_bits_dv_T_1; // @[PTW.scala:523:40]
wire _io_mem_s1_kill_T_2; // @[PTW.scala:531:51]
wire [3:0] io_requestor_0_ptbr_mode_0 = io_dpath_ptbr_mode_0; // @[PTW.scala:219:7]
wire [3:0] io_requestor_1_ptbr_mode_0 = io_dpath_ptbr_mode_0; // @[PTW.scala:219:7]
wire [3:0] satp_mode = io_dpath_ptbr_mode_0; // @[PTW.scala:219:7, :285:17]
wire [43:0] io_requestor_0_ptbr_ppn_0 = io_dpath_ptbr_ppn_0; // @[PTW.scala:219:7]
wire [43:0] io_requestor_1_ptbr_ppn_0 = io_dpath_ptbr_ppn_0; // @[PTW.scala:219:7]
wire [43:0] satp_ppn = io_dpath_ptbr_ppn_0; // @[PTW.scala:219:7, :285:17]
wire io_requestor_0_status_debug_0 = io_dpath_status_debug_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_debug_0 = io_dpath_status_debug_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_cease_0 = io_dpath_status_cease_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_cease_0 = io_dpath_status_cease_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_wfi_0 = io_dpath_status_wfi_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_wfi_0 = io_dpath_status_wfi_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_0_status_isa_0 = io_dpath_status_isa_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_1_status_isa_0 = io_dpath_status_isa_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_status_dprv_0 = io_dpath_status_dprv_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_status_dprv_0 = io_dpath_status_dprv_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_dv_0 = io_dpath_status_dv_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_dv_0 = io_dpath_status_dv_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_status_prv_0 = io_dpath_status_prv_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_status_prv_0 = io_dpath_status_prv_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_v_0 = io_dpath_status_v_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_v_0 = io_dpath_status_v_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_sd_0 = io_dpath_status_sd_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_sd_0 = io_dpath_status_sd_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_mpv_0 = io_dpath_status_mpv_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_mpv_0 = io_dpath_status_mpv_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_gva_0 = io_dpath_status_gva_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_gva_0 = io_dpath_status_gva_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_tsr_0 = io_dpath_status_tsr_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_tsr_0 = io_dpath_status_tsr_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_tw_0 = io_dpath_status_tw_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_tw_0 = io_dpath_status_tw_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_tvm_0 = io_dpath_status_tvm_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_tvm_0 = io_dpath_status_tvm_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_mxr_0 = io_dpath_status_mxr_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_mxr_0 = io_dpath_status_mxr_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_sum_0 = io_dpath_status_sum_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_sum_0 = io_dpath_status_sum_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_mprv_0 = io_dpath_status_mprv_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_mprv_0 = io_dpath_status_mprv_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_status_fs_0 = io_dpath_status_fs_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_status_fs_0 = io_dpath_status_fs_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_status_mpp_0 = io_dpath_status_mpp_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_status_mpp_0 = io_dpath_status_mpp_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_spp_0 = io_dpath_status_spp_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_spp_0 = io_dpath_status_spp_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_mpie_0 = io_dpath_status_mpie_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_mpie_0 = io_dpath_status_mpie_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_spie_0 = io_dpath_status_spie_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_spie_0 = io_dpath_status_spie_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_mie_0 = io_dpath_status_mie_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_mie_0 = io_dpath_status_mie_0; // @[PTW.scala:219:7]
wire io_requestor_0_status_sie_0 = io_dpath_status_sie_0; // @[PTW.scala:219:7]
wire io_requestor_1_status_sie_0 = io_dpath_status_sie_0; // @[PTW.scala:219:7]
wire io_requestor_0_hstatus_spvp_0 = io_dpath_hstatus_spvp_0; // @[PTW.scala:219:7]
wire io_requestor_1_hstatus_spvp_0 = io_dpath_hstatus_spvp_0; // @[PTW.scala:219:7]
wire io_requestor_0_hstatus_spv_0 = io_dpath_hstatus_spv_0; // @[PTW.scala:219:7]
wire io_requestor_1_hstatus_spv_0 = io_dpath_hstatus_spv_0; // @[PTW.scala:219:7]
wire io_requestor_0_hstatus_gva_0 = io_dpath_hstatus_gva_0; // @[PTW.scala:219:7]
wire io_requestor_1_hstatus_gva_0 = io_dpath_hstatus_gva_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_debug_0 = io_dpath_gstatus_debug_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_debug_0 = io_dpath_gstatus_debug_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_cease_0 = io_dpath_gstatus_cease_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_cease_0 = io_dpath_gstatus_cease_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_wfi_0 = io_dpath_gstatus_wfi_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_wfi_0 = io_dpath_gstatus_wfi_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_0_gstatus_isa_0 = io_dpath_gstatus_isa_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_1_gstatus_isa_0 = io_dpath_gstatus_isa_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_gstatus_dprv_0 = io_dpath_gstatus_dprv_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_gstatus_dprv_0 = io_dpath_gstatus_dprv_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_dv_0 = io_dpath_gstatus_dv_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_dv_0 = io_dpath_gstatus_dv_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_gstatus_prv_0 = io_dpath_gstatus_prv_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_gstatus_prv_0 = io_dpath_gstatus_prv_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_v_0 = io_dpath_gstatus_v_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_v_0 = io_dpath_gstatus_v_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_sd_0 = io_dpath_gstatus_sd_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_sd_0 = io_dpath_gstatus_sd_0; // @[PTW.scala:219:7]
wire [22:0] io_requestor_0_gstatus_zero2_0 = io_dpath_gstatus_zero2_0; // @[PTW.scala:219:7]
wire [22:0] io_requestor_1_gstatus_zero2_0 = io_dpath_gstatus_zero2_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_mpv_0 = io_dpath_gstatus_mpv_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_mpv_0 = io_dpath_gstatus_mpv_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_gva_0 = io_dpath_gstatus_gva_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_gva_0 = io_dpath_gstatus_gva_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_mbe_0 = io_dpath_gstatus_mbe_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_mbe_0 = io_dpath_gstatus_mbe_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_sbe_0 = io_dpath_gstatus_sbe_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_sbe_0 = io_dpath_gstatus_sbe_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_gstatus_sxl_0 = io_dpath_gstatus_sxl_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_gstatus_sxl_0 = io_dpath_gstatus_sxl_0; // @[PTW.scala:219:7]
wire [7:0] io_requestor_0_gstatus_zero1_0 = io_dpath_gstatus_zero1_0; // @[PTW.scala:219:7]
wire [7:0] io_requestor_1_gstatus_zero1_0 = io_dpath_gstatus_zero1_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_tsr_0 = io_dpath_gstatus_tsr_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_tsr_0 = io_dpath_gstatus_tsr_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_tw_0 = io_dpath_gstatus_tw_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_tw_0 = io_dpath_gstatus_tw_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_tvm_0 = io_dpath_gstatus_tvm_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_tvm_0 = io_dpath_gstatus_tvm_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_mxr_0 = io_dpath_gstatus_mxr_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_mxr_0 = io_dpath_gstatus_mxr_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_sum_0 = io_dpath_gstatus_sum_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_sum_0 = io_dpath_gstatus_sum_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_mprv_0 = io_dpath_gstatus_mprv_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_mprv_0 = io_dpath_gstatus_mprv_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_gstatus_fs_0 = io_dpath_gstatus_fs_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_gstatus_fs_0 = io_dpath_gstatus_fs_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_gstatus_mpp_0 = io_dpath_gstatus_mpp_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_gstatus_mpp_0 = io_dpath_gstatus_mpp_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_gstatus_vs_0 = io_dpath_gstatus_vs_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_gstatus_vs_0 = io_dpath_gstatus_vs_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_spp_0 = io_dpath_gstatus_spp_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_spp_0 = io_dpath_gstatus_spp_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_mpie_0 = io_dpath_gstatus_mpie_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_mpie_0 = io_dpath_gstatus_mpie_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_ube_0 = io_dpath_gstatus_ube_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_ube_0 = io_dpath_gstatus_ube_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_spie_0 = io_dpath_gstatus_spie_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_spie_0 = io_dpath_gstatus_spie_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_upie_0 = io_dpath_gstatus_upie_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_upie_0 = io_dpath_gstatus_upie_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_mie_0 = io_dpath_gstatus_mie_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_mie_0 = io_dpath_gstatus_mie_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_hie_0 = io_dpath_gstatus_hie_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_hie_0 = io_dpath_gstatus_hie_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_sie_0 = io_dpath_gstatus_sie_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_sie_0 = io_dpath_gstatus_sie_0; // @[PTW.scala:219:7]
wire io_requestor_0_gstatus_uie_0 = io_dpath_gstatus_uie_0; // @[PTW.scala:219:7]
wire io_requestor_1_gstatus_uie_0 = io_dpath_gstatus_uie_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_0_cfg_l_0 = io_dpath_pmp_0_cfg_l_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_0_cfg_l_0 = io_dpath_pmp_0_cfg_l_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_0_cfg_a_0 = io_dpath_pmp_0_cfg_a_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_0_cfg_a_0 = io_dpath_pmp_0_cfg_a_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_0_cfg_x_0 = io_dpath_pmp_0_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_0_cfg_x_0 = io_dpath_pmp_0_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_0_cfg_w_0 = io_dpath_pmp_0_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_0_cfg_w_0 = io_dpath_pmp_0_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_0_cfg_r_0 = io_dpath_pmp_0_cfg_r_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_0_cfg_r_0 = io_dpath_pmp_0_cfg_r_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_0_pmp_0_addr_0 = io_dpath_pmp_0_addr_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_1_pmp_0_addr_0 = io_dpath_pmp_0_addr_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_0_pmp_0_mask_0 = io_dpath_pmp_0_mask_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_1_pmp_0_mask_0 = io_dpath_pmp_0_mask_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_1_cfg_l_0 = io_dpath_pmp_1_cfg_l_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_1_cfg_l_0 = io_dpath_pmp_1_cfg_l_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_1_cfg_a_0 = io_dpath_pmp_1_cfg_a_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_1_cfg_a_0 = io_dpath_pmp_1_cfg_a_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_1_cfg_x_0 = io_dpath_pmp_1_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_1_cfg_x_0 = io_dpath_pmp_1_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_1_cfg_w_0 = io_dpath_pmp_1_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_1_cfg_w_0 = io_dpath_pmp_1_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_1_cfg_r_0 = io_dpath_pmp_1_cfg_r_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_1_cfg_r_0 = io_dpath_pmp_1_cfg_r_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_0_pmp_1_addr_0 = io_dpath_pmp_1_addr_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_1_pmp_1_addr_0 = io_dpath_pmp_1_addr_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_0_pmp_1_mask_0 = io_dpath_pmp_1_mask_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_1_pmp_1_mask_0 = io_dpath_pmp_1_mask_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_2_cfg_l_0 = io_dpath_pmp_2_cfg_l_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_2_cfg_l_0 = io_dpath_pmp_2_cfg_l_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_2_cfg_a_0 = io_dpath_pmp_2_cfg_a_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_2_cfg_a_0 = io_dpath_pmp_2_cfg_a_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_2_cfg_x_0 = io_dpath_pmp_2_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_2_cfg_x_0 = io_dpath_pmp_2_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_2_cfg_w_0 = io_dpath_pmp_2_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_2_cfg_w_0 = io_dpath_pmp_2_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_2_cfg_r_0 = io_dpath_pmp_2_cfg_r_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_2_cfg_r_0 = io_dpath_pmp_2_cfg_r_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_0_pmp_2_addr_0 = io_dpath_pmp_2_addr_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_1_pmp_2_addr_0 = io_dpath_pmp_2_addr_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_0_pmp_2_mask_0 = io_dpath_pmp_2_mask_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_1_pmp_2_mask_0 = io_dpath_pmp_2_mask_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_3_cfg_l_0 = io_dpath_pmp_3_cfg_l_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_3_cfg_l_0 = io_dpath_pmp_3_cfg_l_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_3_cfg_a_0 = io_dpath_pmp_3_cfg_a_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_3_cfg_a_0 = io_dpath_pmp_3_cfg_a_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_3_cfg_x_0 = io_dpath_pmp_3_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_3_cfg_x_0 = io_dpath_pmp_3_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_3_cfg_w_0 = io_dpath_pmp_3_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_3_cfg_w_0 = io_dpath_pmp_3_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_3_cfg_r_0 = io_dpath_pmp_3_cfg_r_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_3_cfg_r_0 = io_dpath_pmp_3_cfg_r_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_0_pmp_3_addr_0 = io_dpath_pmp_3_addr_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_1_pmp_3_addr_0 = io_dpath_pmp_3_addr_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_0_pmp_3_mask_0 = io_dpath_pmp_3_mask_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_1_pmp_3_mask_0 = io_dpath_pmp_3_mask_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_4_cfg_l_0 = io_dpath_pmp_4_cfg_l_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_4_cfg_l_0 = io_dpath_pmp_4_cfg_l_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_4_cfg_a_0 = io_dpath_pmp_4_cfg_a_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_4_cfg_a_0 = io_dpath_pmp_4_cfg_a_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_4_cfg_x_0 = io_dpath_pmp_4_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_4_cfg_x_0 = io_dpath_pmp_4_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_4_cfg_w_0 = io_dpath_pmp_4_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_4_cfg_w_0 = io_dpath_pmp_4_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_4_cfg_r_0 = io_dpath_pmp_4_cfg_r_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_4_cfg_r_0 = io_dpath_pmp_4_cfg_r_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_0_pmp_4_addr_0 = io_dpath_pmp_4_addr_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_1_pmp_4_addr_0 = io_dpath_pmp_4_addr_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_0_pmp_4_mask_0 = io_dpath_pmp_4_mask_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_1_pmp_4_mask_0 = io_dpath_pmp_4_mask_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_5_cfg_l_0 = io_dpath_pmp_5_cfg_l_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_5_cfg_l_0 = io_dpath_pmp_5_cfg_l_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_5_cfg_a_0 = io_dpath_pmp_5_cfg_a_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_5_cfg_a_0 = io_dpath_pmp_5_cfg_a_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_5_cfg_x_0 = io_dpath_pmp_5_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_5_cfg_x_0 = io_dpath_pmp_5_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_5_cfg_w_0 = io_dpath_pmp_5_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_5_cfg_w_0 = io_dpath_pmp_5_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_5_cfg_r_0 = io_dpath_pmp_5_cfg_r_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_5_cfg_r_0 = io_dpath_pmp_5_cfg_r_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_0_pmp_5_addr_0 = io_dpath_pmp_5_addr_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_1_pmp_5_addr_0 = io_dpath_pmp_5_addr_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_0_pmp_5_mask_0 = io_dpath_pmp_5_mask_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_1_pmp_5_mask_0 = io_dpath_pmp_5_mask_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_6_cfg_l_0 = io_dpath_pmp_6_cfg_l_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_6_cfg_l_0 = io_dpath_pmp_6_cfg_l_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_6_cfg_a_0 = io_dpath_pmp_6_cfg_a_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_6_cfg_a_0 = io_dpath_pmp_6_cfg_a_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_6_cfg_x_0 = io_dpath_pmp_6_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_6_cfg_x_0 = io_dpath_pmp_6_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_6_cfg_w_0 = io_dpath_pmp_6_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_6_cfg_w_0 = io_dpath_pmp_6_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_6_cfg_r_0 = io_dpath_pmp_6_cfg_r_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_6_cfg_r_0 = io_dpath_pmp_6_cfg_r_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_0_pmp_6_addr_0 = io_dpath_pmp_6_addr_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_1_pmp_6_addr_0 = io_dpath_pmp_6_addr_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_0_pmp_6_mask_0 = io_dpath_pmp_6_mask_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_1_pmp_6_mask_0 = io_dpath_pmp_6_mask_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_7_cfg_l_0 = io_dpath_pmp_7_cfg_l_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_7_cfg_l_0 = io_dpath_pmp_7_cfg_l_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_pmp_7_cfg_a_0 = io_dpath_pmp_7_cfg_a_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_pmp_7_cfg_a_0 = io_dpath_pmp_7_cfg_a_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_7_cfg_x_0 = io_dpath_pmp_7_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_7_cfg_x_0 = io_dpath_pmp_7_cfg_x_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_7_cfg_w_0 = io_dpath_pmp_7_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_7_cfg_w_0 = io_dpath_pmp_7_cfg_w_0; // @[PTW.scala:219:7]
wire io_requestor_0_pmp_7_cfg_r_0 = io_dpath_pmp_7_cfg_r_0; // @[PTW.scala:219:7]
wire io_requestor_1_pmp_7_cfg_r_0 = io_dpath_pmp_7_cfg_r_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_0_pmp_7_addr_0 = io_dpath_pmp_7_addr_0; // @[PTW.scala:219:7]
wire [29:0] io_requestor_1_pmp_7_addr_0 = io_dpath_pmp_7_addr_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_0_pmp_7_mask_0 = io_dpath_pmp_7_mask_0; // @[PTW.scala:219:7]
wire [31:0] io_requestor_1_pmp_7_mask_0 = io_dpath_pmp_7_mask_0; // @[PTW.scala:219:7]
wire _io_dpath_perf_pte_hit_T_3; // @[PTW.scala:394:57]
wire io_requestor_0_customCSRs_csrs_0_ren_0 = io_dpath_customCSRs_csrs_0_ren_0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_0_ren_0 = io_dpath_customCSRs_csrs_0_ren_0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_0_wen_0 = io_dpath_customCSRs_csrs_0_wen_0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_0_wen_0 = io_dpath_customCSRs_csrs_0_wen_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_0_wdata_0 = io_dpath_customCSRs_csrs_0_wdata_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_0_wdata_0 = io_dpath_customCSRs_csrs_0_wdata_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_0_value_0 = io_dpath_customCSRs_csrs_0_value_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_0_value_0 = io_dpath_customCSRs_csrs_0_value_0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_1_ren_0 = io_dpath_customCSRs_csrs_1_ren_0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_1_ren_0 = io_dpath_customCSRs_csrs_1_ren_0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_1_wen_0 = io_dpath_customCSRs_csrs_1_wen_0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_1_wen_0 = io_dpath_customCSRs_csrs_1_wen_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_1_wdata_0 = io_dpath_customCSRs_csrs_1_wdata_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_1_wdata_0 = io_dpath_customCSRs_csrs_1_wdata_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_1_value_0 = io_dpath_customCSRs_csrs_1_value_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_1_value_0 = io_dpath_customCSRs_csrs_1_value_0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_2_ren_0 = io_dpath_customCSRs_csrs_2_ren_0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_2_ren_0 = io_dpath_customCSRs_csrs_2_ren_0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_2_wen_0 = io_dpath_customCSRs_csrs_2_wen_0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_2_wen_0 = io_dpath_customCSRs_csrs_2_wen_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_2_wdata_0 = io_dpath_customCSRs_csrs_2_wdata_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_2_wdata_0 = io_dpath_customCSRs_csrs_2_wdata_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_2_value_0 = io_dpath_customCSRs_csrs_2_value_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_2_value_0 = io_dpath_customCSRs_csrs_2_value_0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_3_ren_0 = io_dpath_customCSRs_csrs_3_ren_0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_3_ren_0 = io_dpath_customCSRs_csrs_3_ren_0; // @[PTW.scala:219:7]
wire io_requestor_0_customCSRs_csrs_3_wen_0 = io_dpath_customCSRs_csrs_3_wen_0; // @[PTW.scala:219:7]
wire io_requestor_1_customCSRs_csrs_3_wen_0 = io_dpath_customCSRs_csrs_3_wen_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_3_wdata_0 = io_dpath_customCSRs_csrs_3_wdata_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_3_wdata_0 = io_dpath_customCSRs_csrs_3_wdata_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_0_customCSRs_csrs_3_value_0 = io_dpath_customCSRs_csrs_3_value_0; // @[PTW.scala:219:7]
wire [63:0] io_requestor_1_customCSRs_csrs_3_value_0 = io_dpath_customCSRs_csrs_3_value_0; // @[PTW.scala:219:7]
wire _io_dpath_clock_enabled_T; // @[PTW.scala:245:39]
wire io_requestor_0_req_ready_0; // @[PTW.scala:219:7]
wire [9:0] io_requestor_0_resp_bits_pte_reserved_for_future_0; // @[PTW.scala:219:7]
wire [43:0] io_requestor_0_resp_bits_pte_ppn_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_resp_bits_pte_reserved_for_software_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_pte_d_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_pte_a_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_pte_g_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_pte_u_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_pte_x_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_pte_w_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_pte_r_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_pte_v_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_gpa_valid_0; // @[PTW.scala:219:7]
wire [38:0] io_requestor_0_resp_bits_gpa_bits_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_ae_ptw_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_ae_final_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_pf_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_gf_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_hr_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_hw_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_hx_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_0_resp_bits_level_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_homogeneous_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_bits_gpa_is_pte_0; // @[PTW.scala:219:7]
wire io_requestor_0_resp_valid_0; // @[PTW.scala:219:7]
wire io_requestor_1_req_ready_0; // @[PTW.scala:219:7]
wire [9:0] io_requestor_1_resp_bits_pte_reserved_for_future_0; // @[PTW.scala:219:7]
wire [43:0] io_requestor_1_resp_bits_pte_ppn_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_resp_bits_pte_reserved_for_software_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_pte_d_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_pte_a_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_pte_g_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_pte_u_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_pte_x_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_pte_w_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_pte_r_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_pte_v_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_gpa_valid_0; // @[PTW.scala:219:7]
wire [38:0] io_requestor_1_resp_bits_gpa_bits_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_ae_ptw_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_ae_final_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_pf_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_gf_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_hr_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_hw_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_hx_0; // @[PTW.scala:219:7]
wire [1:0] io_requestor_1_resp_bits_level_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_homogeneous_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_bits_gpa_is_pte_0; // @[PTW.scala:219:7]
wire io_requestor_1_resp_valid_0; // @[PTW.scala:219:7]
wire [39:0] io_mem_req_bits_addr_0; // @[PTW.scala:219:7]
wire io_mem_req_bits_dv_0; // @[PTW.scala:219:7]
wire io_mem_req_valid_0; // @[PTW.scala:219:7]
wire io_mem_s1_kill_0; // @[PTW.scala:219:7]
wire io_dpath_perf_pte_miss_0; // @[PTW.scala:219:7]
wire io_dpath_perf_pte_hit_0; // @[PTW.scala:219:7]
wire io_dpath_clock_enabled_0; // @[PTW.scala:219:7]
reg [2:0] state; // @[PTW.scala:233:22]
wire l2_refill_wire; // @[PTW.scala:234:28]
wire _arb_io_out_ready_T = ~(|state); // @[PTW.scala:233:22, :240:30]
wire _arb_io_out_ready_T_1 = ~l2_refill_wire; // @[PTW.scala:234:28, :240:46]
wire _arb_io_out_ready_T_2 = _arb_io_out_ready_T & _arb_io_out_ready_T_1; // @[PTW.scala:240:{30,43,46}]
reg resp_valid_0; // @[PTW.scala:242:27]
assign io_requestor_0_resp_valid_0 = resp_valid_0; // @[PTW.scala:219:7, :242:27]
reg resp_valid_1; // @[PTW.scala:242:27]
assign io_requestor_1_resp_valid_0 = resp_valid_1; // @[PTW.scala:219:7, :242:27]
wire _clock_en_T = |state; // @[PTW.scala:233:22, :240:30, :244:24]
wire _clock_en_T_1 = _clock_en_T | l2_refill_wire; // @[PTW.scala:234:28, :244:{24,36}]
wire _clock_en_T_2 = _clock_en_T_1 | _arb_io_out_valid; // @[PTW.scala:236:19, :244:{36,54}]
wire _clock_en_T_3 = _clock_en_T_2 | io_dpath_sfence_valid_0; // @[PTW.scala:219:7, :244:{54,74}]
wire _clock_en_T_4 = io_dpath_customCSRs_csrs_0_value_0[0]; // @[CustomCSRs.scala:43:61]
wire clock_en = _clock_en_T_3 | _clock_en_T_4; // @[CustomCSRs.scala:43:61]
assign _io_dpath_clock_enabled_T = clock_en; // @[PTW.scala:244:99, :245:39]
assign io_dpath_clock_enabled_0 = _io_dpath_clock_enabled_T; // @[PTW.scala:219:7, :245:39]
reg invalidated; // @[PTW.scala:251:24]
reg [1:0] count; // @[PTW.scala:259:18]
wire [1:0] _r_pte_truncIdx_T = count; // @[package.scala:38:21]
reg resp_ae_ptw; // @[PTW.scala:260:24]
assign io_requestor_0_resp_bits_ae_ptw_0 = resp_ae_ptw; // @[PTW.scala:219:7, :260:24]
assign io_requestor_1_resp_bits_ae_ptw_0 = resp_ae_ptw; // @[PTW.scala:219:7, :260:24]
reg resp_ae_final; // @[PTW.scala:261:26]
assign io_requestor_0_resp_bits_ae_final_0 = resp_ae_final; // @[PTW.scala:219:7, :261:26]
assign io_requestor_1_resp_bits_ae_final_0 = resp_ae_final; // @[PTW.scala:219:7, :261:26]
reg resp_pf; // @[PTW.scala:262:20]
assign io_requestor_0_resp_bits_pf_0 = resp_pf; // @[PTW.scala:219:7, :262:20]
assign io_requestor_1_resp_bits_pf_0 = resp_pf; // @[PTW.scala:219:7, :262:20]
reg resp_gf; // @[PTW.scala:263:20]
assign io_requestor_0_resp_bits_gf_0 = resp_gf; // @[PTW.scala:219:7, :263:20]
assign io_requestor_1_resp_bits_gf_0 = resp_gf; // @[PTW.scala:219:7, :263:20]
reg resp_hr; // @[PTW.scala:264:20]
assign io_requestor_0_resp_bits_hr_0 = resp_hr; // @[PTW.scala:219:7, :264:20]
assign io_requestor_1_resp_bits_hr_0 = resp_hr; // @[PTW.scala:219:7, :264:20]
reg resp_hw; // @[PTW.scala:265:20]
assign io_requestor_0_resp_bits_hw_0 = resp_hw; // @[PTW.scala:219:7, :265:20]
assign io_requestor_1_resp_bits_hw_0 = resp_hw; // @[PTW.scala:219:7, :265:20]
reg resp_hx; // @[PTW.scala:266:20]
assign io_requestor_0_resp_bits_hx_0 = resp_hx; // @[PTW.scala:219:7, :266:20]
assign io_requestor_1_resp_bits_hx_0 = resp_hx; // @[PTW.scala:219:7, :266:20]
reg resp_fragmented_superpage; // @[PTW.scala:267:38]
reg [26:0] r_req_addr; // @[PTW.scala:270:18]
reg r_req_need_gpa; // @[PTW.scala:270:18]
assign io_requestor_0_resp_bits_gpa_valid_0 = r_req_need_gpa; // @[PTW.scala:219:7, :270:18]
assign io_requestor_1_resp_bits_gpa_valid_0 = r_req_need_gpa; // @[PTW.scala:219:7, :270:18]
reg r_req_vstage1; // @[PTW.scala:270:18]
reg r_req_stage2; // @[PTW.scala:270:18]
reg r_req_dest; // @[PTW.scala:272:23]
reg [9:0] r_pte_reserved_for_future; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_reserved_for_future_0 = r_pte_reserved_for_future; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_reserved_for_future_0 = r_pte_reserved_for_future; // @[PTW.scala:219:7, :275:18]
wire [9:0] r_pte_pte_2_reserved_for_future = r_pte_reserved_for_future; // @[PTW.scala:275:18, :780:26]
wire [9:0] r_pte_pte_3_reserved_for_future = r_pte_reserved_for_future; // @[PTW.scala:275:18, :771:26]
wire [9:0] r_pte_pte_4_reserved_for_future = r_pte_reserved_for_future; // @[PTW.scala:275:18, :780:26]
wire [9:0] r_pte_pte_5_reserved_for_future = r_pte_reserved_for_future; // @[PTW.scala:275:18, :771:26]
reg [43:0] r_pte_ppn; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_ppn_0 = r_pte_ppn; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_ppn_0 = r_pte_ppn; // @[PTW.scala:219:7, :275:18]
reg [1:0] r_pte_reserved_for_software; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_reserved_for_software_0 = r_pte_reserved_for_software; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_reserved_for_software_0 = r_pte_reserved_for_software; // @[PTW.scala:219:7, :275:18]
wire [1:0] r_pte_pte_2_reserved_for_software = r_pte_reserved_for_software; // @[PTW.scala:275:18, :780:26]
wire [1:0] r_pte_pte_3_reserved_for_software = r_pte_reserved_for_software; // @[PTW.scala:275:18, :771:26]
wire [1:0] r_pte_pte_4_reserved_for_software = r_pte_reserved_for_software; // @[PTW.scala:275:18, :780:26]
wire [1:0] r_pte_pte_5_reserved_for_software = r_pte_reserved_for_software; // @[PTW.scala:275:18, :771:26]
reg r_pte_d; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_d_0 = r_pte_d; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_d_0 = r_pte_d; // @[PTW.scala:219:7, :275:18]
wire r_pte_pte_2_d = r_pte_d; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_3_d = r_pte_d; // @[PTW.scala:275:18, :771:26]
wire r_pte_pte_4_d = r_pte_d; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_5_d = r_pte_d; // @[PTW.scala:275:18, :771:26]
reg r_pte_a; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_a_0 = r_pte_a; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_a_0 = r_pte_a; // @[PTW.scala:219:7, :275:18]
wire r_pte_pte_2_a = r_pte_a; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_3_a = r_pte_a; // @[PTW.scala:275:18, :771:26]
wire r_pte_pte_4_a = r_pte_a; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_5_a = r_pte_a; // @[PTW.scala:275:18, :771:26]
reg r_pte_g; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_g_0 = r_pte_g; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_g_0 = r_pte_g; // @[PTW.scala:219:7, :275:18]
wire r_pte_pte_2_g = r_pte_g; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_3_g = r_pte_g; // @[PTW.scala:275:18, :771:26]
wire r_pte_pte_4_g = r_pte_g; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_5_g = r_pte_g; // @[PTW.scala:275:18, :771:26]
reg r_pte_u; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_u_0 = r_pte_u; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_u_0 = r_pte_u; // @[PTW.scala:219:7, :275:18]
wire r_pte_pte_2_u = r_pte_u; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_3_u = r_pte_u; // @[PTW.scala:275:18, :771:26]
wire r_pte_pte_4_u = r_pte_u; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_5_u = r_pte_u; // @[PTW.scala:275:18, :771:26]
reg r_pte_x; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_x_0 = r_pte_x; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_x_0 = r_pte_x; // @[PTW.scala:219:7, :275:18]
wire r_pte_pte_2_x = r_pte_x; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_3_x = r_pte_x; // @[PTW.scala:275:18, :771:26]
wire r_pte_pte_4_x = r_pte_x; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_5_x = r_pte_x; // @[PTW.scala:275:18, :771:26]
reg r_pte_w; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_w_0 = r_pte_w; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_w_0 = r_pte_w; // @[PTW.scala:219:7, :275:18]
wire r_pte_pte_2_w = r_pte_w; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_3_w = r_pte_w; // @[PTW.scala:275:18, :771:26]
wire r_pte_pte_4_w = r_pte_w; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_5_w = r_pte_w; // @[PTW.scala:275:18, :771:26]
reg r_pte_r; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_r_0 = r_pte_r; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_r_0 = r_pte_r; // @[PTW.scala:219:7, :275:18]
wire r_pte_pte_2_r = r_pte_r; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_3_r = r_pte_r; // @[PTW.scala:275:18, :771:26]
wire r_pte_pte_4_r = r_pte_r; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_5_r = r_pte_r; // @[PTW.scala:275:18, :771:26]
reg r_pte_v; // @[PTW.scala:275:18]
assign io_requestor_0_resp_bits_pte_v_0 = r_pte_v; // @[PTW.scala:219:7, :275:18]
assign io_requestor_1_resp_bits_pte_v_0 = r_pte_v; // @[PTW.scala:219:7, :275:18]
wire r_pte_pte_2_v = r_pte_v; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_3_v = r_pte_v; // @[PTW.scala:275:18, :771:26]
wire r_pte_pte_4_v = r_pte_v; // @[PTW.scala:275:18, :780:26]
wire r_pte_pte_5_v = r_pte_v; // @[PTW.scala:275:18, :771:26]
reg [3:0] r_hgatp_mode; // @[PTW.scala:276:20]
reg [15:0] r_hgatp_asid; // @[PTW.scala:276:20]
reg [43:0] r_hgatp_ppn; // @[PTW.scala:276:20]
reg [1:0] aux_count; // @[PTW.scala:278:22]
wire [1:0] _io_requestor_0_resp_bits_gpa_bits_truncIdx_T = aux_count; // @[package.scala:38:21]
wire [1:0] _io_requestor_1_resp_bits_gpa_bits_truncIdx_T = aux_count; // @[package.scala:38:21]
reg [9:0] aux_pte_reserved_for_future; // @[PTW.scala:280:20]
wire [9:0] merged_pte_reserved_for_future = aux_pte_reserved_for_future; // @[PTW.scala:280:20, :771:26]
reg [43:0] aux_pte_ppn; // @[PTW.scala:280:20]
reg [1:0] aux_pte_reserved_for_software; // @[PTW.scala:280:20]
wire [1:0] merged_pte_reserved_for_software = aux_pte_reserved_for_software; // @[PTW.scala:280:20, :771:26]
reg aux_pte_d; // @[PTW.scala:280:20]
wire merged_pte_d = aux_pte_d; // @[PTW.scala:280:20, :771:26]
reg aux_pte_a; // @[PTW.scala:280:20]
wire merged_pte_a = aux_pte_a; // @[PTW.scala:280:20, :771:26]
reg aux_pte_g; // @[PTW.scala:280:20]
wire merged_pte_g = aux_pte_g; // @[PTW.scala:280:20, :771:26]
reg aux_pte_u; // @[PTW.scala:280:20]
wire merged_pte_u = aux_pte_u; // @[PTW.scala:280:20, :771:26]
reg aux_pte_x; // @[PTW.scala:280:20]
wire merged_pte_x = aux_pte_x; // @[PTW.scala:280:20, :771:26]
reg aux_pte_w; // @[PTW.scala:280:20]
wire merged_pte_w = aux_pte_w; // @[PTW.scala:280:20, :771:26]
reg aux_pte_r; // @[PTW.scala:280:20]
wire merged_pte_r = aux_pte_r; // @[PTW.scala:280:20, :771:26]
reg aux_pte_v; // @[PTW.scala:280:20]
wire merged_pte_v = aux_pte_v; // @[PTW.scala:280:20, :771:26]
reg [11:0] gpa_pgoff; // @[PTW.scala:281:22]
reg stage2; // @[PTW.scala:282:19]
reg stage2_final; // @[PTW.scala:283:25]
wire [43:0] r_pte_pte_5_ppn = satp_ppn; // @[PTW.scala:285:17, :771:26]
wire do_both_stages = r_req_vstage1 & r_req_stage2; // @[PTW.scala:270:18, :288:38]
wire _max_count_T = count < aux_count; // @[PTW.scala:259:18, :278:22, :289:25]
assign max_count = _max_count_T ? aux_count : count; // @[PTW.scala:259:18, :278:22, :289:25]
assign io_requestor_0_resp_bits_level_0 = max_count; // @[PTW.scala:219:7, :289:25]
assign io_requestor_1_resp_bits_level_0 = max_count; // @[PTW.scala:219:7, :289:25]
wire _vpn_T = r_req_vstage1 & stage2; // @[PTW.scala:270:18, :282:19, :290:31]
wire [43:0] vpn = _vpn_T ? aux_pte_ppn : {17'h0, r_req_addr}; // @[PTW.scala:270:18, :280:20, :290:{16,31}]
wire [43:0] _pte_addr_vpn_idxs_T_2 = vpn; // @[PTW.scala:290:16, :322:12]
reg mem_resp_valid; // @[PTW.scala:292:31]
reg [63:0] mem_resp_data; // @[PTW.scala:293:30]
wire [63:0] _tmp_WIRE = mem_resp_data; // @[PTW.scala:293:30, :304:37]
wire [9:0] _tmp_T_10; // @[PTW.scala:304:37]
wire [43:0] _tmp_T_9; // @[PTW.scala:304:37]
wire [9:0] pte_reserved_for_future = tmp_reserved_for_future; // @[PTW.scala:304:37, :305:26]
wire [1:0] _tmp_T_8; // @[PTW.scala:304:37]
wire _tmp_T_7; // @[PTW.scala:304:37]
wire [1:0] pte_reserved_for_software = tmp_reserved_for_software; // @[PTW.scala:304:37, :305:26]
wire _tmp_T_6; // @[PTW.scala:304:37]
wire pte_d = tmp_d; // @[PTW.scala:304:37, :305:26]
wire _tmp_T_5; // @[PTW.scala:304:37]
wire pte_a = tmp_a; // @[PTW.scala:304:37, :305:26]
wire _tmp_T_4; // @[PTW.scala:304:37]
wire pte_g = tmp_g; // @[PTW.scala:304:37, :305:26]
wire _tmp_T_3; // @[PTW.scala:304:37]
wire pte_u = tmp_u; // @[PTW.scala:304:37, :305:26]
wire _tmp_T_2; // @[PTW.scala:304:37]
wire pte_x = tmp_x; // @[PTW.scala:304:37, :305:26]
wire _tmp_T_1; // @[PTW.scala:304:37]
wire pte_w = tmp_w; // @[PTW.scala:304:37, :305:26]
wire _tmp_T; // @[PTW.scala:304:37]
wire pte_r = tmp_r; // @[PTW.scala:304:37, :305:26]
wire [43:0] tmp_ppn; // @[PTW.scala:304:37]
wire tmp_v; // @[PTW.scala:304:37]
assign _tmp_T = _tmp_WIRE[0]; // @[PTW.scala:304:37]
assign tmp_v = _tmp_T; // @[PTW.scala:304:37]
assign _tmp_T_1 = _tmp_WIRE[1]; // @[PTW.scala:304:37]
assign tmp_r = _tmp_T_1; // @[PTW.scala:304:37]
assign _tmp_T_2 = _tmp_WIRE[2]; // @[PTW.scala:304:37]
assign tmp_w = _tmp_T_2; // @[PTW.scala:304:37]
assign _tmp_T_3 = _tmp_WIRE[3]; // @[PTW.scala:304:37]
assign tmp_x = _tmp_T_3; // @[PTW.scala:304:37]
assign _tmp_T_4 = _tmp_WIRE[4]; // @[PTW.scala:304:37]
assign tmp_u = _tmp_T_4; // @[PTW.scala:304:37]
assign _tmp_T_5 = _tmp_WIRE[5]; // @[PTW.scala:304:37]
assign tmp_g = _tmp_T_5; // @[PTW.scala:304:37]
assign _tmp_T_6 = _tmp_WIRE[6]; // @[PTW.scala:304:37]
assign tmp_a = _tmp_T_6; // @[PTW.scala:304:37]
assign _tmp_T_7 = _tmp_WIRE[7]; // @[PTW.scala:304:37]
assign tmp_d = _tmp_T_7; // @[PTW.scala:304:37]
assign _tmp_T_8 = _tmp_WIRE[9:8]; // @[PTW.scala:304:37]
assign tmp_reserved_for_software = _tmp_T_8; // @[PTW.scala:304:37]
assign _tmp_T_9 = _tmp_WIRE[53:10]; // @[PTW.scala:304:37]
assign tmp_ppn = _tmp_T_9; // @[PTW.scala:304:37]
assign _tmp_T_10 = _tmp_WIRE[63:54]; // @[PTW.scala:304:37]
assign tmp_reserved_for_future = _tmp_T_10; // @[PTW.scala:304:37]
wire [9:0] aux_pte_pte_reserved_for_future = pte_reserved_for_future; // @[PTW.scala:305:26, :771:26]
wire [1:0] aux_pte_pte_reserved_for_software = pte_reserved_for_software; // @[PTW.scala:305:26, :771:26]
wire aux_pte_pte_d = pte_d; // @[PTW.scala:305:26, :771:26]
wire aux_pte_pte_a = pte_a; // @[PTW.scala:305:26, :771:26]
wire aux_pte_pte_g = pte_g; // @[PTW.scala:305:26, :771:26]
wire aux_pte_pte_u = pte_u; // @[PTW.scala:305:26, :771:26]
wire aux_pte_pte_x = pte_x; // @[PTW.scala:305:26, :771:26]
wire aux_pte_pte_w = pte_w; // @[PTW.scala:305:26, :771:26]
wire aux_pte_pte_r = pte_r; // @[PTW.scala:305:26, :771:26]
wire [43:0] pte_ppn; // @[PTW.scala:305:26]
wire pte_v; // @[PTW.scala:305:26]
wire aux_pte_pte_v = pte_v; // @[PTW.scala:305:26, :771:26]
wire _res_ppn_T = ~stage2; // @[PTW.scala:282:19, :306:38]
wire _res_ppn_T_1 = do_both_stages & _res_ppn_T; // @[PTW.scala:288:38, :306:{35,38}]
wire [26:0] _res_ppn_T_2 = tmp_ppn[26:0]; // @[PTW.scala:304:37, :306:54]
wire [19:0] _res_ppn_T_3 = tmp_ppn[19:0]; // @[PTW.scala:304:37, :306:99]
wire [26:0] _res_ppn_T_4 = _res_ppn_T_1 ? _res_ppn_T_2 : {7'h0, _res_ppn_T_3}; // @[PTW.scala:306:{19,35,54,99}]
assign pte_ppn = {17'h0, _res_ppn_T_4}; // @[PTW.scala:305:26, :306:{13,19}]
assign pte_v = ~((tmp_r | tmp_w | tmp_x) & (~(count[1]) & (|(tmp_ppn[8:0])) | count == 2'h0 & (|(tmp_ppn[17:9])))) & tmp_v; // @[PTW.scala:259:18, :304:37, :305:26, :307:{17,26,36}, :310:{21,28,38,97,106,114}]
wire invalid_paddr = do_both_stages & ~stage2 ? (|(tmp_ppn[43:27])) : (|(tmp_ppn[43:20])); // @[PTW.scala:282:19, :288:38, :304:37, :306:38, :313:{9,25,46,58,76,88}]
wire [14:0] idxs_0 = tmp_ppn[43:29]; // @[PTW.scala:304:37, :787:58]
wire invalid_gpa = do_both_stages & ~stage2 & (|idxs_0); // @[PTW.scala:282:19, :288:38, :306:38, :314:{21,32}, :787:58, :788:25]
wire _traverse_T = ~pte_r; // @[PTW.scala:139:36, :305:26]
wire _traverse_T_1 = pte_v & _traverse_T; // @[PTW.scala:139:{33,36}, :305:26]
wire _traverse_T_2 = ~pte_w; // @[PTW.scala:139:42, :305:26]
wire _traverse_T_3 = _traverse_T_1 & _traverse_T_2; // @[PTW.scala:139:{33,39,42}]
wire _traverse_T_4 = ~pte_x; // @[PTW.scala:139:48, :305:26]
wire _traverse_T_5 = _traverse_T_3 & _traverse_T_4; // @[PTW.scala:139:{39,45,48}]
wire _traverse_T_6 = ~pte_d; // @[PTW.scala:139:54, :305:26]
wire _traverse_T_7 = _traverse_T_5 & _traverse_T_6; // @[PTW.scala:139:{45,51,54}]
wire _traverse_T_8 = ~pte_a; // @[PTW.scala:139:60, :305:26]
wire _traverse_T_9 = _traverse_T_7 & _traverse_T_8; // @[PTW.scala:139:{51,57,60}]
wire _traverse_T_10 = ~pte_u; // @[PTW.scala:139:66, :305:26]
wire _traverse_T_11 = _traverse_T_9 & _traverse_T_10; // @[PTW.scala:139:{57,63,66}]
wire _traverse_T_12 = ~(|pte_reserved_for_future); // @[PTW.scala:139:92, :305:26]
wire _traverse_T_13 = _traverse_T_11 & _traverse_T_12; // @[PTW.scala:139:{63,69,92}]
wire _traverse_T_14 = ~invalid_paddr; // @[PTW.scala:313:9, :317:33]
wire _traverse_T_15 = _traverse_T_13 & _traverse_T_14; // @[PTW.scala:139:69, :317:{30,33}]
wire _traverse_T_16 = ~invalid_gpa; // @[PTW.scala:314:32, :317:51]
wire _traverse_T_17 = _traverse_T_15 & _traverse_T_16; // @[PTW.scala:317:{30,48,51}]
wire _traverse_T_18 = ~(count[1]); // @[PTW.scala:259:18, :310:21, :317:73]
wire traverse = _traverse_T_17 & _traverse_T_18; // @[PTW.scala:317:{48,64,73}]
wire [25:0] _pte_addr_vpn_idxs_T = vpn[43:18]; // @[PTW.scala:290:16, :322:12]
wire [8:0] pte_addr_vpn_idxs_0 = _pte_addr_vpn_idxs_T[8:0]; // @[PTW.scala:322:{12,48}]
wire [34:0] _pte_addr_vpn_idxs_T_1 = vpn[43:9]; // @[PTW.scala:290:16, :322:12]
wire [8:0] pte_addr_vpn_idxs_1 = _pte_addr_vpn_idxs_T_1[8:0]; // @[PTW.scala:322:{12,48}]
wire [8:0] pte_addr_vpn_idxs_2 = _pte_addr_vpn_idxs_T_2[8:0]; // @[PTW.scala:322:{12,48}]
wire _pte_addr_mask_T = ~(|count); // @[PTW.scala:259:18, :324:40]
wire _pte_addr_mask_T_1 = stage2 & _pte_addr_mask_T; // @[PTW.scala:282:19, :324:{31,40}]
wire _T_46 = count == 2'h1; // @[package.scala:39:86]
wire _pte_addr_vpn_idx_T; // @[package.scala:39:86]
assign _pte_addr_vpn_idx_T = _T_46; // @[package.scala:39:86]
wire _pmaHomogeneous_T; // @[package.scala:39:86]
assign _pmaHomogeneous_T = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_3; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_3 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_T_23; // @[package.scala:39:86]
assign _pmpHomogeneous_T_23 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_11; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_11 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_T_60; // @[package.scala:39:86]
assign _pmpHomogeneous_T_60 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_5; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_5 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_19; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_19 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_T_97; // @[package.scala:39:86]
assign _pmpHomogeneous_T_97 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_10; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_10 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_27; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_27 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_T_134; // @[package.scala:39:86]
assign _pmpHomogeneous_T_134 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_15; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_15 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_35; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_35 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_T_171; // @[package.scala:39:86]
assign _pmpHomogeneous_T_171 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_20; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_20 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_43; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_43 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_T_208; // @[package.scala:39:86]
assign _pmpHomogeneous_T_208 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_25; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_25 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_51; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_51 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_T_245; // @[package.scala:39:86]
assign _pmpHomogeneous_T_245 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_30; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_30 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_59; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_59 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_T_282; // @[package.scala:39:86]
assign _pmpHomogeneous_T_282 = _T_46; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_35; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_35 = _T_46; // @[package.scala:39:86]
wire _merged_pte_stage1_ppn_T; // @[package.scala:39:86]
assign _merged_pte_stage1_ppn_T = _T_46; // @[package.scala:39:86]
wire _aux_pte_T; // @[package.scala:39:86]
assign _aux_pte_T = _T_46; // @[package.scala:39:86]
wire _leaf_T_5; // @[PTW.scala:751:53]
assign _leaf_T_5 = _T_46; // @[package.scala:39:86]
wire [8:0] _pte_addr_vpn_idx_T_1 = _pte_addr_vpn_idx_T ? pte_addr_vpn_idxs_1 : pte_addr_vpn_idxs_0; // @[package.scala:39:{76,86}]
wire _T_241 = count == 2'h2; // @[package.scala:39:86]
wire _pte_addr_vpn_idx_T_2; // @[package.scala:39:86]
assign _pte_addr_vpn_idx_T_2 = _T_241; // @[package.scala:39:86]
wire _pmaHomogeneous_T_2; // @[package.scala:39:86]
assign _pmaHomogeneous_T_2 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_5; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_5 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_T_25; // @[package.scala:39:86]
assign _pmpHomogeneous_T_25 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_2; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_2 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_13; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_13 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_T_62; // @[package.scala:39:86]
assign _pmpHomogeneous_T_62 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_7; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_7 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_21; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_21 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_T_99; // @[package.scala:39:86]
assign _pmpHomogeneous_T_99 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_12; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_12 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_29; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_29 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_T_136; // @[package.scala:39:86]
assign _pmpHomogeneous_T_136 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_17; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_17 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_37; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_37 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_T_173; // @[package.scala:39:86]
assign _pmpHomogeneous_T_173 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_22; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_22 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_45; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_45 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_T_210; // @[package.scala:39:86]
assign _pmpHomogeneous_T_210 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_27; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_27 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_53; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_53 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_T_247; // @[package.scala:39:86]
assign _pmpHomogeneous_T_247 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_32; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_32 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_maskHomogeneous_T_61; // @[package.scala:39:86]
assign _pmpHomogeneous_maskHomogeneous_T_61 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_T_284; // @[package.scala:39:86]
assign _pmpHomogeneous_T_284 = _T_241; // @[package.scala:39:86]
wire _pmpHomogeneous_pgMask_T_37; // @[package.scala:39:86]
assign _pmpHomogeneous_pgMask_T_37 = _T_241; // @[package.scala:39:86]
wire _merged_pte_stage1_ppn_T_2; // @[package.scala:39:86]
assign _merged_pte_stage1_ppn_T_2 = _T_241; // @[package.scala:39:86]
wire _l2_refill_T; // @[PTW.scala:713:39]
assign _l2_refill_T = _T_241; // @[package.scala:39:86]
wire _aux_pte_T_2; // @[package.scala:39:86]
assign _aux_pte_T_2 = _T_241; // @[package.scala:39:86]
wire _leaf_T_8; // @[PTW.scala:751:53]
assign _leaf_T_8 = _T_241; // @[package.scala:39:86]
wire [8:0] _pte_addr_vpn_idx_T_3 = _pte_addr_vpn_idx_T_2 ? pte_addr_vpn_idxs_2 : _pte_addr_vpn_idx_T_1; // @[package.scala:39:{76,86}]
wire _pte_addr_vpn_idx_T_4 = &count; // @[package.scala:39:86]
wire [8:0] _pte_addr_vpn_idx_T_5 = _pte_addr_vpn_idx_T_4 ? pte_addr_vpn_idxs_2 : _pte_addr_vpn_idx_T_3; // @[package.scala:39:{76,86}]
wire [8:0] pte_addr_vpn_idx = _pte_addr_vpn_idx_T_5; // @[package.scala:39:76]
wire [52:0] _pte_addr_raw_pte_addr_T = {r_pte_ppn, 9'h0}; // @[PTW.scala:275:18, :326:36]
wire [52:0] _pte_addr_raw_pte_addr_T_1 = {_pte_addr_raw_pte_addr_T[52:9], _pte_addr_raw_pte_addr_T[8:0] | pte_addr_vpn_idx}; // @[PTW.scala:325:36, :326:{36,52}]
wire [55:0] pte_addr_raw_pte_addr = {_pte_addr_raw_pte_addr_T_1, 3'h0}; // @[PTW.scala:326:{52,63}]
wire [31:0] pte_addr = pte_addr_raw_pte_addr[31:0]; // @[PTW.scala:326:63, :330:23]
reg [6:0] state_reg; // @[Replacement.scala:168:70]
reg [7:0] valid; // @[PTW.scala:352:24]
reg [31:0] tags_0; // @[PTW.scala:353:19]
reg [31:0] tags_1; // @[PTW.scala:353:19]
reg [31:0] tags_2; // @[PTW.scala:353:19]
reg [31:0] tags_3; // @[PTW.scala:353:19]
reg [31:0] tags_4; // @[PTW.scala:353:19]
reg [31:0] tags_5; // @[PTW.scala:353:19]
reg [31:0] tags_6; // @[PTW.scala:353:19]
reg [31:0] tags_7; // @[PTW.scala:353:19]
reg [19:0] data_0; // @[PTW.scala:355:19]
reg [19:0] data_1; // @[PTW.scala:355:19]
reg [19:0] data_2; // @[PTW.scala:355:19]
reg [19:0] data_3; // @[PTW.scala:355:19]
reg [19:0] data_4; // @[PTW.scala:355:19]
reg [19:0] data_5; // @[PTW.scala:355:19]
reg [19:0] data_6; // @[PTW.scala:355:19]
reg [19:0] data_7; // @[PTW.scala:355:19]
wire _can_hit_T = ~(count[1]); // @[PTW.scala:259:18, :310:21, :317:73, :358:18]
wire _can_hit_T_1 = ~r_req_stage2; // @[PTW.scala:270:18, :358:65]
wire _can_hit_T_2 = r_req_vstage1 ? stage2 : _can_hit_T_1; // @[PTW.scala:270:18, :282:19, :358:{41,65}]
wire can_hit = _can_hit_T & _can_hit_T_2; // @[PTW.scala:358:{18,35,41}]
wire [32:0] tag = {r_req_vstage1, pte_addr}; // @[PTW.scala:270:18, :330:23, :364:15]
wire _hits_T = {1'h0, tags_0} == tag; // @[PTW.scala:353:19, :364:15, :366:27]
wire _hits_T_1 = {1'h0, tags_1} == tag; // @[PTW.scala:353:19, :364:15, :366:27]
wire _hits_T_2 = {1'h0, tags_2} == tag; // @[PTW.scala:353:19, :364:15, :366:27]
wire _hits_T_3 = {1'h0, tags_3} == tag; // @[PTW.scala:353:19, :364:15, :366:27]
wire _hits_T_4 = {1'h0, tags_4} == tag; // @[PTW.scala:353:19, :364:15, :366:27]
wire _hits_T_5 = {1'h0, tags_5} == tag; // @[PTW.scala:353:19, :364:15, :366:27]
wire _hits_T_6 = {1'h0, tags_6} == tag; // @[PTW.scala:353:19, :364:15, :366:27]
wire _hits_T_7 = {1'h0, tags_7} == tag; // @[PTW.scala:353:19, :364:15, :366:27]
wire [1:0] hits_lo_lo = {_hits_T_1, _hits_T}; // @[package.scala:45:27]
wire [1:0] hits_lo_hi = {_hits_T_3, _hits_T_2}; // @[package.scala:45:27]
wire [3:0] hits_lo = {hits_lo_hi, hits_lo_lo}; // @[package.scala:45:27]
wire [1:0] hits_hi_lo = {_hits_T_5, _hits_T_4}; // @[package.scala:45:27]
wire [1:0] hits_hi_hi = {_hits_T_7, _hits_T_6}; // @[package.scala:45:27]
wire [3:0] hits_hi = {hits_hi_hi, hits_hi_lo}; // @[package.scala:45:27]
wire [7:0] _hits_T_8 = {hits_hi, hits_lo}; // @[package.scala:45:27]
wire [7:0] hits = _hits_T_8 & valid; // @[package.scala:45:27]
wire _hit_T = |hits; // @[PTW.scala:366:43, :367:20]
wire pte_cache_hit = _hit_T & can_hit; // @[PTW.scala:358:35, :367:{20,24}]
wire _r_T = &valid; // @[PTW.scala:352:24, :370:25]
wire r_left_subtree_older = state_reg[6]; // @[Replacement.scala:168:70, :243:38]
wire [2:0] r_left_subtree_state = state_reg[5:3]; // @[package.scala:163:13]
wire [2:0] state_reg_left_subtree_state = state_reg[5:3]; // @[package.scala:163:13]
wire [2:0] state_reg_left_subtree_state_3 = state_reg[5:3]; // @[package.scala:163:13]
wire [2:0] r_right_subtree_state = state_reg[2:0]; // @[Replacement.scala:168:70, :245:38]
wire [2:0] state_reg_right_subtree_state = state_reg[2:0]; // @[Replacement.scala:168:70, :198:38, :245:38]
wire [2:0] state_reg_right_subtree_state_3 = state_reg[2:0]; // @[Replacement.scala:168:70, :198:38, :245:38]
wire r_left_subtree_older_1 = r_left_subtree_state[2]; // @[package.scala:163:13]
wire r_left_subtree_state_1 = r_left_subtree_state[1]; // @[package.scala:163:13]
wire _r_T_1 = r_left_subtree_state_1; // @[package.scala:163:13]
wire r_right_subtree_state_1 = r_left_subtree_state[0]; // @[package.scala:163:13]
wire _r_T_2 = r_right_subtree_state_1; // @[Replacement.scala:245:38, :262:12]
wire _r_T_3 = r_left_subtree_older_1 ? _r_T_1 : _r_T_2; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _r_T_4 = {r_left_subtree_older_1, _r_T_3}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire r_left_subtree_older_2 = r_right_subtree_state[2]; // @[Replacement.scala:243:38, :245:38]
wire r_left_subtree_state_2 = r_right_subtree_state[1]; // @[package.scala:163:13]
wire _r_T_5 = r_left_subtree_state_2; // @[package.scala:163:13]
wire r_right_subtree_state_2 = r_right_subtree_state[0]; // @[Replacement.scala:245:38]
wire _r_T_6 = r_right_subtree_state_2; // @[Replacement.scala:245:38, :262:12]
wire _r_T_7 = r_left_subtree_older_2 ? _r_T_5 : _r_T_6; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _r_T_8 = {r_left_subtree_older_2, _r_T_7}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [1:0] _r_T_9 = r_left_subtree_older ? _r_T_4 : _r_T_8; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [2:0] _r_T_10 = {r_left_subtree_older, _r_T_9}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [7:0] _r_T_11 = ~valid; // @[PTW.scala:352:24, :370:57]
wire _r_T_12 = _r_T_11[0]; // @[OneHot.scala:48:45]
wire _r_T_13 = _r_T_11[1]; // @[OneHot.scala:48:45]
wire _r_T_14 = _r_T_11[2]; // @[OneHot.scala:48:45]
wire _r_T_15 = _r_T_11[3]; // @[OneHot.scala:48:45]
wire _r_T_16 = _r_T_11[4]; // @[OneHot.scala:48:45]
wire _r_T_17 = _r_T_11[5]; // @[OneHot.scala:48:45]
wire _r_T_18 = _r_T_11[6]; // @[OneHot.scala:48:45]
wire _r_T_19 = _r_T_11[7]; // @[OneHot.scala:48:45]
wire [2:0] _r_T_20 = {2'h3, ~_r_T_18}; // @[OneHot.scala:48:45]
wire [2:0] _r_T_21 = _r_T_17 ? 3'h5 : _r_T_20; // @[OneHot.scala:48:45]
wire [2:0] _r_T_22 = _r_T_16 ? 3'h4 : _r_T_21; // @[OneHot.scala:48:45]
wire [2:0] _r_T_23 = _r_T_15 ? 3'h3 : _r_T_22; // @[OneHot.scala:48:45]
wire [2:0] _r_T_24 = _r_T_14 ? 3'h2 : _r_T_23; // @[OneHot.scala:48:45]
wire [2:0] _r_T_25 = _r_T_13 ? 3'h1 : _r_T_24; // @[OneHot.scala:48:45]
wire [2:0] _r_T_26 = _r_T_12 ? 3'h0 : _r_T_25; // @[OneHot.scala:48:45]
wire [2:0] r = _r_T ? _r_T_10 : _r_T_26; // @[Mux.scala:50:70]
wire [2:0] state_reg_touch_way_sized = r; // @[package.scala:163:13]
wire [7:0] _valid_T = 8'h1 << r; // @[OneHot.scala:58:35]
wire [7:0] _valid_T_1 = valid | _valid_T; // @[OneHot.scala:58:35]
wire _state_reg_set_left_older_T = state_reg_touch_way_sized[2]; // @[package.scala:163:13]
wire state_reg_set_left_older = ~_state_reg_set_left_older_T; // @[Replacement.scala:196:{33,43}]
wire [1:0] _state_reg_T = state_reg_touch_way_sized[1:0]; // @[package.scala:163:13]
wire [1:0] _state_reg_T_11 = state_reg_touch_way_sized[1:0]; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_1 = _state_reg_T[1]; // @[package.scala:163:13]
wire state_reg_set_left_older_1 = ~_state_reg_set_left_older_T_1; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_1 = state_reg_left_subtree_state[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_1 = state_reg_left_subtree_state[0]; // @[package.scala:163:13]
wire _state_reg_T_1 = _state_reg_T[0]; // @[package.scala:163:13]
wire _state_reg_T_5 = _state_reg_T[0]; // @[package.scala:163:13]
wire _state_reg_T_2 = _state_reg_T_1; // @[package.scala:163:13]
wire _state_reg_T_3 = ~_state_reg_T_2; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_4 = state_reg_set_left_older_1 ? state_reg_left_subtree_state_1 : _state_reg_T_3; // @[package.scala:163:13]
wire _state_reg_T_6 = _state_reg_T_5; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_7 = ~_state_reg_T_6; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_8 = state_reg_set_left_older_1 ? _state_reg_T_7 : state_reg_right_subtree_state_1; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi = {state_reg_set_left_older_1, _state_reg_T_4}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_9 = {state_reg_hi, _state_reg_T_8}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_10 = state_reg_set_left_older ? state_reg_left_subtree_state : _state_reg_T_9; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_2 = _state_reg_T_11[1]; // @[Replacement.scala:196:43, :207:62]
wire state_reg_set_left_older_2 = ~_state_reg_set_left_older_T_2; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_2 = state_reg_right_subtree_state[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_2 = state_reg_right_subtree_state[0]; // @[Replacement.scala:198:38]
wire _state_reg_T_12 = _state_reg_T_11[0]; // @[package.scala:163:13]
wire _state_reg_T_16 = _state_reg_T_11[0]; // @[package.scala:163:13]
wire _state_reg_T_13 = _state_reg_T_12; // @[package.scala:163:13]
wire _state_reg_T_14 = ~_state_reg_T_13; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_15 = state_reg_set_left_older_2 ? state_reg_left_subtree_state_2 : _state_reg_T_14; // @[package.scala:163:13]
wire _state_reg_T_17 = _state_reg_T_16; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_18 = ~_state_reg_T_17; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_19 = state_reg_set_left_older_2 ? _state_reg_T_18 : state_reg_right_subtree_state_2; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_1 = {state_reg_set_left_older_2, _state_reg_T_15}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_20 = {state_reg_hi_1, _state_reg_T_19}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_21 = state_reg_set_left_older ? _state_reg_T_20 : state_reg_right_subtree_state; // @[Replacement.scala:196:33, :198:38, :202:12, :206:16]
wire [3:0] state_reg_hi_2 = {state_reg_set_left_older, _state_reg_T_10}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [6:0] _state_reg_T_22 = {state_reg_hi_2, _state_reg_T_21}; // @[Replacement.scala:202:12, :206:16]
wire _T_152 = state == 3'h1; // @[PTW.scala:233:22, :377:24]
wire _io_dpath_perf_pte_hit_T; // @[PTW.scala:394:46]
assign _io_dpath_perf_pte_hit_T = _T_152; // @[PTW.scala:377:24, :394:46]
wire _io_mem_req_valid_T; // @[PTW.scala:515:29]
assign _io_mem_req_valid_T = _T_152; // @[PTW.scala:377:24, :515:29]
wire _r_pte_T_4; // @[PTW.scala:672:15]
assign _r_pte_T_4 = _T_152; // @[PTW.scala:377:24, :672:15]
wire _r_pte_T_6; // @[PTW.scala:674:15]
assign _r_pte_T_6 = _T_152; // @[PTW.scala:377:24, :674:15]
wire [3:0] hi = hits[7:4]; // @[OneHot.scala:30:18]
wire [3:0] lo = hits[3:0]; // @[OneHot.scala:31:18]
wire [3:0] _T_30 = hi | lo; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] hi_1 = _T_30[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] lo_1 = _T_30[1:0]; // @[OneHot.scala:31:18, :32:28]
wire [2:0] state_reg_touch_way_sized_1 = {|hi, |hi_1, hi_1[1] | lo_1[1]}; // @[OneHot.scala:30:18, :31:18, :32:{10,14,28}]
wire _state_reg_set_left_older_T_3 = state_reg_touch_way_sized_1[2]; // @[package.scala:163:13]
wire state_reg_set_left_older_3 = ~_state_reg_set_left_older_T_3; // @[Replacement.scala:196:{33,43}]
wire [1:0] _state_reg_T_23 = state_reg_touch_way_sized_1[1:0]; // @[package.scala:163:13]
wire [1:0] _state_reg_T_34 = state_reg_touch_way_sized_1[1:0]; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_4 = _state_reg_T_23[1]; // @[package.scala:163:13]
wire state_reg_set_left_older_4 = ~_state_reg_set_left_older_T_4; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_4 = state_reg_left_subtree_state_3[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_4 = state_reg_left_subtree_state_3[0]; // @[package.scala:163:13]
wire _state_reg_T_24 = _state_reg_T_23[0]; // @[package.scala:163:13]
wire _state_reg_T_28 = _state_reg_T_23[0]; // @[package.scala:163:13]
wire _state_reg_T_25 = _state_reg_T_24; // @[package.scala:163:13]
wire _state_reg_T_26 = ~_state_reg_T_25; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_27 = state_reg_set_left_older_4 ? state_reg_left_subtree_state_4 : _state_reg_T_26; // @[package.scala:163:13]
wire _state_reg_T_29 = _state_reg_T_28; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_30 = ~_state_reg_T_29; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_31 = state_reg_set_left_older_4 ? _state_reg_T_30 : state_reg_right_subtree_state_4; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_3 = {state_reg_set_left_older_4, _state_reg_T_27}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_32 = {state_reg_hi_3, _state_reg_T_31}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_33 = state_reg_set_left_older_3 ? state_reg_left_subtree_state_3 : _state_reg_T_32; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_5 = _state_reg_T_34[1]; // @[Replacement.scala:196:43, :207:62]
wire state_reg_set_left_older_5 = ~_state_reg_set_left_older_T_5; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_5 = state_reg_right_subtree_state_3[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_5 = state_reg_right_subtree_state_3[0]; // @[Replacement.scala:198:38]
wire _state_reg_T_35 = _state_reg_T_34[0]; // @[package.scala:163:13]
wire _state_reg_T_39 = _state_reg_T_34[0]; // @[package.scala:163:13]
wire _state_reg_T_36 = _state_reg_T_35; // @[package.scala:163:13]
wire _state_reg_T_37 = ~_state_reg_T_36; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_38 = state_reg_set_left_older_5 ? state_reg_left_subtree_state_5 : _state_reg_T_37; // @[package.scala:163:13]
wire _state_reg_T_40 = _state_reg_T_39; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_41 = ~_state_reg_T_40; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_42 = state_reg_set_left_older_5 ? _state_reg_T_41 : state_reg_right_subtree_state_5; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_4 = {state_reg_set_left_older_5, _state_reg_T_38}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_43 = {state_reg_hi_4, _state_reg_T_42}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_44 = state_reg_set_left_older_3 ? _state_reg_T_43 : state_reg_right_subtree_state_3; // @[Replacement.scala:196:33, :198:38, :202:12, :206:16]
wire [3:0] state_reg_hi_5 = {state_reg_set_left_older_3, _state_reg_T_33}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [6:0] _state_reg_T_45 = {state_reg_hi_5, _state_reg_T_44}; // @[Replacement.scala:202:12, :206:16]
wire _leaf_T_2 = ~(|count); // @[PTW.scala:259:18, :324:40, :382:47, :751:53]
wire [19:0] pte_cache_data = (hits[0] ? data_0 : 20'h0) | (hits[1] ? data_1 : 20'h0) | (hits[2] ? data_2 : 20'h0) | (hits[3] ? data_3 : 20'h0) | (hits[4] ? data_4 : 20'h0) | (hits[5] ? data_5 : 20'h0) | (hits[6] ? data_6 : 20'h0) | (hits[7] ? data_7 : 20'h0); // @[Mux.scala:30:73, :32:36]
reg [6:0] state_reg_1; // @[Replacement.scala:168:70]
reg [7:0] valid_1; // @[PTW.scala:352:24]
reg [19:0] data_1_0; // @[PTW.scala:355:19]
reg [19:0] data_1_1; // @[PTW.scala:355:19]
reg [19:0] data_1_2; // @[PTW.scala:355:19]
reg [19:0] data_1_3; // @[PTW.scala:355:19]
reg [19:0] data_1_4; // @[PTW.scala:355:19]
reg [19:0] data_1_5; // @[PTW.scala:355:19]
reg [19:0] data_1_6; // @[PTW.scala:355:19]
reg [19:0] data_1_7; // @[PTW.scala:355:19]
wire _can_hit_T_3 = ~(|count); // @[PTW.scala:259:18, :324:40, :357:21]
wire _can_hit_T_4 = ~(aux_count[1]); // @[PTW.scala:278:22, :357:60]
wire _can_hit_T_5 = _can_hit_T_3 & _can_hit_T_4; // @[PTW.scala:357:{21,47,60}]
wire _can_hit_T_6 = _can_hit_T_5 & r_req_vstage1; // @[PTW.scala:270:18, :357:{47,77}]
wire _can_hit_T_7 = _can_hit_T_6 & stage2; // @[PTW.scala:282:19, :357:{77,94}]
wire _can_hit_T_8 = ~stage2_final; // @[PTW.scala:283:25, :357:107]
wire can_hit_1 = _can_hit_T_7 & _can_hit_T_8; // @[PTW.scala:357:{94,104,107}]
wire _can_refill_T = ~stage2; // @[PTW.scala:282:19, :306:38, :360:33]
wire _can_refill_T_1 = do_both_stages & _can_refill_T; // @[PTW.scala:288:38, :360:{30,33}]
wire _can_refill_T_2 = ~stage2_final; // @[PTW.scala:283:25, :357:107, :360:44]
wire can_refill = _can_refill_T_1 & _can_refill_T_2; // @[PTW.scala:360:{30,41,44}]
wire _r_T_27 = &valid_1; // @[PTW.scala:352:24, :370:25]
wire r_left_subtree_older_3 = state_reg_1[6]; // @[Replacement.scala:168:70, :243:38]
wire [2:0] r_left_subtree_state_3 = state_reg_1[5:3]; // @[package.scala:163:13]
wire [2:0] state_reg_left_subtree_state_6 = state_reg_1[5:3]; // @[package.scala:163:13]
wire [2:0] state_reg_left_subtree_state_9 = state_reg_1[5:3]; // @[package.scala:163:13]
wire [2:0] r_right_subtree_state_3 = state_reg_1[2:0]; // @[Replacement.scala:168:70, :245:38]
wire [2:0] state_reg_right_subtree_state_6 = state_reg_1[2:0]; // @[Replacement.scala:168:70, :198:38, :245:38]
wire [2:0] state_reg_right_subtree_state_9 = state_reg_1[2:0]; // @[Replacement.scala:168:70, :198:38, :245:38]
wire r_left_subtree_older_4 = r_left_subtree_state_3[2]; // @[package.scala:163:13]
wire r_left_subtree_state_4 = r_left_subtree_state_3[1]; // @[package.scala:163:13]
wire _r_T_28 = r_left_subtree_state_4; // @[package.scala:163:13]
wire r_right_subtree_state_4 = r_left_subtree_state_3[0]; // @[package.scala:163:13]
wire _r_T_29 = r_right_subtree_state_4; // @[Replacement.scala:245:38, :262:12]
wire _r_T_30 = r_left_subtree_older_4 ? _r_T_28 : _r_T_29; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _r_T_31 = {r_left_subtree_older_4, _r_T_30}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire r_left_subtree_older_5 = r_right_subtree_state_3[2]; // @[Replacement.scala:243:38, :245:38]
wire r_left_subtree_state_5 = r_right_subtree_state_3[1]; // @[package.scala:163:13]
wire _r_T_32 = r_left_subtree_state_5; // @[package.scala:163:13]
wire r_right_subtree_state_5 = r_right_subtree_state_3[0]; // @[Replacement.scala:245:38]
wire _r_T_33 = r_right_subtree_state_5; // @[Replacement.scala:245:38, :262:12]
wire _r_T_34 = r_left_subtree_older_5 ? _r_T_32 : _r_T_33; // @[Replacement.scala:243:38, :250:16, :262:12]
wire [1:0] _r_T_35 = {r_left_subtree_older_5, _r_T_34}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [1:0] _r_T_36 = r_left_subtree_older_3 ? _r_T_31 : _r_T_35; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [2:0] _r_T_37 = {r_left_subtree_older_3, _r_T_36}; // @[Replacement.scala:243:38, :249:12, :250:16]
wire [7:0] _r_T_38 = ~valid_1; // @[PTW.scala:352:24, :370:57]
wire _r_T_39 = _r_T_38[0]; // @[OneHot.scala:48:45]
wire _r_T_40 = _r_T_38[1]; // @[OneHot.scala:48:45]
wire _r_T_41 = _r_T_38[2]; // @[OneHot.scala:48:45]
wire _r_T_42 = _r_T_38[3]; // @[OneHot.scala:48:45]
wire _r_T_43 = _r_T_38[4]; // @[OneHot.scala:48:45]
wire _r_T_44 = _r_T_38[5]; // @[OneHot.scala:48:45]
wire _r_T_45 = _r_T_38[6]; // @[OneHot.scala:48:45]
wire _r_T_46 = _r_T_38[7]; // @[OneHot.scala:48:45]
wire [2:0] _r_T_47 = {2'h3, ~_r_T_45}; // @[OneHot.scala:48:45]
wire [2:0] _r_T_48 = _r_T_44 ? 3'h5 : _r_T_47; // @[OneHot.scala:48:45]
wire [2:0] _r_T_49 = _r_T_43 ? 3'h4 : _r_T_48; // @[OneHot.scala:48:45]
wire [2:0] _r_T_50 = _r_T_42 ? 3'h3 : _r_T_49; // @[OneHot.scala:48:45]
wire [2:0] _r_T_51 = _r_T_41 ? 3'h2 : _r_T_50; // @[OneHot.scala:48:45]
wire [2:0] _r_T_52 = _r_T_40 ? 3'h1 : _r_T_51; // @[OneHot.scala:48:45]
wire [2:0] _r_T_53 = _r_T_39 ? 3'h0 : _r_T_52; // @[OneHot.scala:48:45]
wire [2:0] r_1 = _r_T_27 ? _r_T_37 : _r_T_53; // @[Mux.scala:50:70]
wire [2:0] state_reg_touch_way_sized_2 = r_1; // @[package.scala:163:13]
wire [7:0] _valid_T_2 = 8'h1 << r_1; // @[OneHot.scala:58:35]
wire [7:0] _valid_T_3 = valid_1 | _valid_T_2; // @[OneHot.scala:58:35]
wire _state_reg_set_left_older_T_6 = state_reg_touch_way_sized_2[2]; // @[package.scala:163:13]
wire state_reg_set_left_older_6 = ~_state_reg_set_left_older_T_6; // @[Replacement.scala:196:{33,43}]
wire [1:0] _state_reg_T_46 = state_reg_touch_way_sized_2[1:0]; // @[package.scala:163:13]
wire [1:0] _state_reg_T_57 = state_reg_touch_way_sized_2[1:0]; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_7 = _state_reg_T_46[1]; // @[package.scala:163:13]
wire state_reg_set_left_older_7 = ~_state_reg_set_left_older_T_7; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_7 = state_reg_left_subtree_state_6[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_7 = state_reg_left_subtree_state_6[0]; // @[package.scala:163:13]
wire _state_reg_T_47 = _state_reg_T_46[0]; // @[package.scala:163:13]
wire _state_reg_T_51 = _state_reg_T_46[0]; // @[package.scala:163:13]
wire _state_reg_T_48 = _state_reg_T_47; // @[package.scala:163:13]
wire _state_reg_T_49 = ~_state_reg_T_48; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_50 = state_reg_set_left_older_7 ? state_reg_left_subtree_state_7 : _state_reg_T_49; // @[package.scala:163:13]
wire _state_reg_T_52 = _state_reg_T_51; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_53 = ~_state_reg_T_52; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_54 = state_reg_set_left_older_7 ? _state_reg_T_53 : state_reg_right_subtree_state_7; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_6 = {state_reg_set_left_older_7, _state_reg_T_50}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_55 = {state_reg_hi_6, _state_reg_T_54}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_56 = state_reg_set_left_older_6 ? state_reg_left_subtree_state_6 : _state_reg_T_55; // @[package.scala:163:13]
wire _state_reg_set_left_older_T_8 = _state_reg_T_57[1]; // @[Replacement.scala:196:43, :207:62]
wire state_reg_set_left_older_8 = ~_state_reg_set_left_older_T_8; // @[Replacement.scala:196:{33,43}]
wire state_reg_left_subtree_state_8 = state_reg_right_subtree_state_6[1]; // @[package.scala:163:13]
wire state_reg_right_subtree_state_8 = state_reg_right_subtree_state_6[0]; // @[Replacement.scala:198:38]
wire _state_reg_T_58 = _state_reg_T_57[0]; // @[package.scala:163:13]
wire _state_reg_T_62 = _state_reg_T_57[0]; // @[package.scala:163:13]
wire _state_reg_T_59 = _state_reg_T_58; // @[package.scala:163:13]
wire _state_reg_T_60 = ~_state_reg_T_59; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_61 = state_reg_set_left_older_8 ? state_reg_left_subtree_state_8 : _state_reg_T_60; // @[package.scala:163:13]
wire _state_reg_T_63 = _state_reg_T_62; // @[Replacement.scala:207:62, :218:17]
wire _state_reg_T_64 = ~_state_reg_T_63; // @[Replacement.scala:218:{7,17}]
wire _state_reg_T_65 = state_reg_set_left_older_8 ? _state_reg_T_64 : state_reg_right_subtree_state_8; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7]
wire [1:0] state_reg_hi_7 = {state_reg_set_left_older_8, _state_reg_T_61}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [2:0] _state_reg_T_66 = {state_reg_hi_7, _state_reg_T_65}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_67 = state_reg_set_left_older_6 ? _state_reg_T_66 : state_reg_right_subtree_state_6; // @[Replacement.scala:196:33, :198:38, :202:12, :206:16]
wire [3:0] state_reg_hi_8 = {state_reg_set_left_older_6, _state_reg_T_56}; // @[Replacement.scala:196:33, :202:12, :203:16]
wire [6:0] _state_reg_T_68 = {state_reg_hi_8, _state_reg_T_67}; // @[Replacement.scala:202:12, :206:16]
wire [2:0] _state_reg_T_79 = state_reg_left_subtree_state_9; // @[package.scala:163:13]
wire state_reg_left_subtree_state_10 = state_reg_left_subtree_state_9[1]; // @[package.scala:163:13]
wire _state_reg_T_73 = state_reg_left_subtree_state_10; // @[package.scala:163:13]
wire state_reg_right_subtree_state_10 = state_reg_left_subtree_state_9[0]; // @[package.scala:163:13]
wire [1:0] state_reg_hi_9 = {1'h1, _state_reg_T_73}; // @[Replacement.scala:202:12, :203:16]
wire [2:0] _state_reg_T_78 = {state_reg_hi_9, 1'h1}; // @[Replacement.scala:202:12]
wire state_reg_left_subtree_state_11 = state_reg_right_subtree_state_9[1]; // @[package.scala:163:13]
wire _state_reg_T_84 = state_reg_left_subtree_state_11; // @[package.scala:163:13]
wire state_reg_right_subtree_state_11 = state_reg_right_subtree_state_9[0]; // @[Replacement.scala:198:38]
wire [1:0] state_reg_hi_10 = {1'h1, _state_reg_T_84}; // @[Replacement.scala:202:12, :203:16]
wire [2:0] _state_reg_T_89 = {state_reg_hi_10, 1'h1}; // @[Replacement.scala:202:12]
wire [2:0] _state_reg_T_90 = _state_reg_T_89; // @[Replacement.scala:202:12, :206:16]
wire [3:0] state_reg_hi_11 = {1'h1, _state_reg_T_79}; // @[Replacement.scala:202:12, :203:16]
wire [6:0] _state_reg_T_91 = {state_reg_hi_11, _state_reg_T_90}; // @[Replacement.scala:202:12, :206:16]
reg pte_hit; // @[PTW.scala:392:24]
wire _io_dpath_perf_pte_hit_T_1 = pte_hit & _io_dpath_perf_pte_hit_T; // @[PTW.scala:392:24, :394:{36,46}]
assign _io_dpath_perf_pte_hit_T_3 = _io_dpath_perf_pte_hit_T_1; // @[PTW.scala:394:{36,57}]
assign io_dpath_perf_pte_hit_0 = _io_dpath_perf_pte_hit_T_3; // @[PTW.scala:219:7, :394:57]
reg l2_refill; // @[PTW.scala:398:26]
assign l2_refill_wire = l2_refill; // @[PTW.scala:234:28, :398:26]
wire _invalidated_T = |state; // @[PTW.scala:233:22, :240:30, :511:65]
wire _invalidated_T_1 = invalidated & _invalidated_T; // @[PTW.scala:251:24, :511:{56,65}]
wire _invalidated_T_2 = io_dpath_sfence_valid_0 | _invalidated_T_1; // @[PTW.scala:219:7, :511:{40,56}]
wire _io_mem_req_valid_T_1 = state == 3'h3; // @[PTW.scala:233:22, :515:48]
assign _io_mem_req_valid_T_2 = _io_mem_req_valid_T | _io_mem_req_valid_T_1; // @[PTW.scala:515:{29,39,48}]
assign io_mem_req_valid_0 = _io_mem_req_valid_T_2; // @[PTW.scala:219:7, :515:39]
assign io_mem_req_bits_addr_0 = {8'h0, pte_addr}; // @[PTW.scala:219:7, :330:23, :520:24]
wire _io_mem_req_bits_dv_T = ~stage2; // @[PTW.scala:282:19, :306:38, :523:43]
assign _io_mem_req_bits_dv_T_1 = do_both_stages & _io_mem_req_bits_dv_T; // @[PTW.scala:288:38, :523:{40,43}]
assign io_mem_req_bits_dv_0 = _io_mem_req_bits_dv_T_1; // @[PTW.scala:219:7, :523:40]
wire _io_mem_s1_kill_T = state != 3'h2; // @[PTW.scala:233:22, :531:38]
wire _io_mem_s1_kill_T_1 = _io_mem_s1_kill_T; // @[PTW.scala:531:{28,38}]
assign _io_mem_s1_kill_T_2 = _io_mem_s1_kill_T_1 | resp_gf; // @[PTW.scala:263:20, :531:{28,51}]
assign io_mem_s1_kill_0 = _io_mem_s1_kill_T_2; // @[PTW.scala:219:7, :531:51]
wire [55:0] _GEN = {r_pte_ppn, 12'h0}; // @[PTW.scala:275:18, :544:96]
wire [55:0] _pmaPgLevelHomogeneous_T; // @[PTW.scala:544:96]
assign _pmaPgLevelHomogeneous_T = _GEN; // @[PTW.scala:544:96]
wire [55:0] _pmaPgLevelHomogeneous_T_7; // @[PTW.scala:544:96]
assign _pmaPgLevelHomogeneous_T_7 = _GEN; // @[PTW.scala:544:96]
wire [55:0] _pmaPgLevelHomogeneous_T_37; // @[PTW.scala:544:96]
assign _pmaPgLevelHomogeneous_T_37 = _GEN; // @[PTW.scala:544:96]
wire [55:0] _pmpHomogeneous_T; // @[PTW.scala:548:80]
assign _pmpHomogeneous_T = _GEN; // @[PTW.scala:544:96, :548:80]
wire [55:0] _pmaPgLevelHomogeneous_T_21 = _pmaPgLevelHomogeneous_T_7; // @[PTW.scala:544:96]
wire [55:0] _pmaPgLevelHomogeneous_T_28 = _pmaPgLevelHomogeneous_T_7; // @[PTW.scala:544:96]
wire [55:0] _pmaPgLevelHomogeneous_T_8 = {_pmaPgLevelHomogeneous_T_7[55:28], _pmaPgLevelHomogeneous_T_7[27:0] ^ 28'hC000000}; // @[PTW.scala:544:96]
wire [56:0] _pmaPgLevelHomogeneous_T_9 = {1'h0, _pmaPgLevelHomogeneous_T_8}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_10 = _pmaPgLevelHomogeneous_T_9 & 57'h1FFFFFFFC000000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_11 = _pmaPgLevelHomogeneous_T_10; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_12 = _pmaPgLevelHomogeneous_T_11 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_18 = _pmaPgLevelHomogeneous_T_12; // @[TLBPermissions.scala:101:65]
wire [55:0] _pmaPgLevelHomogeneous_T_13 = {_pmaPgLevelHomogeneous_T_7[55:32], _pmaPgLevelHomogeneous_T_7[31:0] ^ 32'h80000000}; // @[PTW.scala:544:96]
wire [56:0] _pmaPgLevelHomogeneous_T_14 = {1'h0, _pmaPgLevelHomogeneous_T_13}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_15 = _pmaPgLevelHomogeneous_T_14 & 57'h1FFFFFFF0000000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_16 = _pmaPgLevelHomogeneous_T_15; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_17 = _pmaPgLevelHomogeneous_T_16 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire pmaPgLevelHomogeneous_1 = _pmaPgLevelHomogeneous_T_18 | _pmaPgLevelHomogeneous_T_17; // @[TLBPermissions.scala:101:65]
wire [56:0] _pmaPgLevelHomogeneous_T_22 = {1'h0, _pmaPgLevelHomogeneous_T_21}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_23 = _pmaPgLevelHomogeneous_T_22 & 57'h80000000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_24 = _pmaPgLevelHomogeneous_T_23; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_25 = _pmaPgLevelHomogeneous_T_24 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_26 = _pmaPgLevelHomogeneous_T_25; // @[TLBPermissions.scala:87:66]
wire _pmaPgLevelHomogeneous_T_27 = ~_pmaPgLevelHomogeneous_T_26; // @[TLBPermissions.scala:87:{22,66}]
wire [56:0] _pmaPgLevelHomogeneous_T_29 = {1'h0, _pmaPgLevelHomogeneous_T_28}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_30 = _pmaPgLevelHomogeneous_T_29 & 57'h80000000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_31 = _pmaPgLevelHomogeneous_T_30; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_32 = _pmaPgLevelHomogeneous_T_31 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_33 = _pmaPgLevelHomogeneous_T_32; // @[TLBPermissions.scala:87:66]
wire _pmaPgLevelHomogeneous_T_34 = ~_pmaPgLevelHomogeneous_T_33; // @[TLBPermissions.scala:87:{22,66}]
wire [55:0] _pmaPgLevelHomogeneous_T_38 = _pmaPgLevelHomogeneous_T_37; // @[PTW.scala:544:96]
wire [55:0] _pmaPgLevelHomogeneous_T_105 = _pmaPgLevelHomogeneous_T_37; // @[PTW.scala:544:96]
wire [56:0] _pmaPgLevelHomogeneous_T_39 = {1'h0, _pmaPgLevelHomogeneous_T_38}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_40 = _pmaPgLevelHomogeneous_T_39 & 57'h1FFFFFFFFFFE000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_41 = _pmaPgLevelHomogeneous_T_40; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_42 = _pmaPgLevelHomogeneous_T_41 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_88 = _pmaPgLevelHomogeneous_T_42; // @[TLBPermissions.scala:101:65]
wire [55:0] _GEN_0 = {_pmaPgLevelHomogeneous_T_37[55:14], _pmaPgLevelHomogeneous_T_37[13:0] ^ 14'h3000}; // @[PTW.scala:544:96]
wire [55:0] _pmaPgLevelHomogeneous_T_43; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_43 = _GEN_0; // @[Parameters.scala:137:31]
wire [55:0] _pmaPgLevelHomogeneous_T_110; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_110 = _GEN_0; // @[Parameters.scala:137:31]
wire [56:0] _pmaPgLevelHomogeneous_T_44 = {1'h0, _pmaPgLevelHomogeneous_T_43}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_45 = _pmaPgLevelHomogeneous_T_44 & 57'h1FFFFFFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_46 = _pmaPgLevelHomogeneous_T_45; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_47 = _pmaPgLevelHomogeneous_T_46 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [55:0] _GEN_1 = {_pmaPgLevelHomogeneous_T_37[55:17], _pmaPgLevelHomogeneous_T_37[16:0] ^ 17'h10000}; // @[PTW.scala:544:96]
wire [55:0] _pmaPgLevelHomogeneous_T_48; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_48 = _GEN_1; // @[Parameters.scala:137:31]
wire [55:0] _pmaPgLevelHomogeneous_T_98; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_98 = _GEN_1; // @[Parameters.scala:137:31]
wire [55:0] _pmaPgLevelHomogeneous_T_115; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_115 = _GEN_1; // @[Parameters.scala:137:31]
wire [55:0] _pmaPgLevelHomogeneous_T_147; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_147 = _GEN_1; // @[Parameters.scala:137:31]
wire [55:0] _pmaPgLevelHomogeneous_T_154; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_154 = _GEN_1; // @[Parameters.scala:137:31]
wire [56:0] _pmaPgLevelHomogeneous_T_49 = {1'h0, _pmaPgLevelHomogeneous_T_48}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_50 = _pmaPgLevelHomogeneous_T_49 & 57'h1FFFFFFFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_51 = _pmaPgLevelHomogeneous_T_50; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_52 = _pmaPgLevelHomogeneous_T_51 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [55:0] _pmaPgLevelHomogeneous_T_53 = {_pmaPgLevelHomogeneous_T_37[55:21], _pmaPgLevelHomogeneous_T_37[20:0] ^ 21'h100000}; // @[PTW.scala:544:96]
wire [56:0] _pmaPgLevelHomogeneous_T_54 = {1'h0, _pmaPgLevelHomogeneous_T_53}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_55 = _pmaPgLevelHomogeneous_T_54 & 57'h1FFFFFFFFFEF000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_56 = _pmaPgLevelHomogeneous_T_55; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_57 = _pmaPgLevelHomogeneous_T_56 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [55:0] _pmaPgLevelHomogeneous_T_58 = {_pmaPgLevelHomogeneous_T_37[55:26], _pmaPgLevelHomogeneous_T_37[25:0] ^ 26'h2000000}; // @[PTW.scala:544:96]
wire [56:0] _pmaPgLevelHomogeneous_T_59 = {1'h0, _pmaPgLevelHomogeneous_T_58}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_60 = _pmaPgLevelHomogeneous_T_59 & 57'h1FFFFFFFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_61 = _pmaPgLevelHomogeneous_T_60; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_62 = _pmaPgLevelHomogeneous_T_61 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [55:0] _pmaPgLevelHomogeneous_T_63 = {_pmaPgLevelHomogeneous_T_37[55:26], _pmaPgLevelHomogeneous_T_37[25:0] ^ 26'h2010000}; // @[PTW.scala:544:96]
wire [56:0] _pmaPgLevelHomogeneous_T_64 = {1'h0, _pmaPgLevelHomogeneous_T_63}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_65 = _pmaPgLevelHomogeneous_T_64 & 57'h1FFFFFFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_66 = _pmaPgLevelHomogeneous_T_65; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_67 = _pmaPgLevelHomogeneous_T_66 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [55:0] _GEN_2 = {_pmaPgLevelHomogeneous_T_37[55:28], _pmaPgLevelHomogeneous_T_37[27:0] ^ 28'h8000000}; // @[PTW.scala:544:96]
wire [55:0] _pmaPgLevelHomogeneous_T_68; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_68 = _GEN_2; // @[Parameters.scala:137:31]
wire [55:0] _pmaPgLevelHomogeneous_T_120; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_120 = _GEN_2; // @[Parameters.scala:137:31]
wire [55:0] _pmaPgLevelHomogeneous_T_135; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_135 = _GEN_2; // @[Parameters.scala:137:31]
wire [56:0] _pmaPgLevelHomogeneous_T_69 = {1'h0, _pmaPgLevelHomogeneous_T_68}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_70 = _pmaPgLevelHomogeneous_T_69 & 57'h1FFFFFFFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_71 = _pmaPgLevelHomogeneous_T_70; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_72 = _pmaPgLevelHomogeneous_T_71 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [55:0] _pmaPgLevelHomogeneous_T_73 = {_pmaPgLevelHomogeneous_T_37[55:28], _pmaPgLevelHomogeneous_T_37[27:0] ^ 28'hC000000}; // @[PTW.scala:544:96]
wire [56:0] _pmaPgLevelHomogeneous_T_74 = {1'h0, _pmaPgLevelHomogeneous_T_73}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_75 = _pmaPgLevelHomogeneous_T_74 & 57'h1FFFFFFFC000000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_76 = _pmaPgLevelHomogeneous_T_75; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_77 = _pmaPgLevelHomogeneous_T_76 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [55:0] _pmaPgLevelHomogeneous_T_78 = {_pmaPgLevelHomogeneous_T_37[55:29], _pmaPgLevelHomogeneous_T_37[28:0] ^ 29'h10020000}; // @[PTW.scala:544:96]
wire [56:0] _pmaPgLevelHomogeneous_T_79 = {1'h0, _pmaPgLevelHomogeneous_T_78}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_80 = _pmaPgLevelHomogeneous_T_79 & 57'h1FFFFFFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_81 = _pmaPgLevelHomogeneous_T_80; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_82 = _pmaPgLevelHomogeneous_T_81 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [55:0] _GEN_3 = {_pmaPgLevelHomogeneous_T_37[55:32], _pmaPgLevelHomogeneous_T_37[31:0] ^ 32'h80000000}; // @[PTW.scala:544:96]
wire [55:0] _pmaPgLevelHomogeneous_T_83; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_83 = _GEN_3; // @[Parameters.scala:137:31]
wire [55:0] _pmaPgLevelHomogeneous_T_125; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_125 = _GEN_3; // @[Parameters.scala:137:31]
wire [55:0] _pmaPgLevelHomogeneous_T_140; // @[Parameters.scala:137:31]
assign _pmaPgLevelHomogeneous_T_140 = _GEN_3; // @[Parameters.scala:137:31]
wire [56:0] _pmaPgLevelHomogeneous_T_84 = {1'h0, _pmaPgLevelHomogeneous_T_83}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_85 = _pmaPgLevelHomogeneous_T_84 & 57'h1FFFFFFF0000000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_86 = _pmaPgLevelHomogeneous_T_85; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_87 = _pmaPgLevelHomogeneous_T_86 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_89 = _pmaPgLevelHomogeneous_T_88 | _pmaPgLevelHomogeneous_T_47; // @[TLBPermissions.scala:101:65]
wire _pmaPgLevelHomogeneous_T_90 = _pmaPgLevelHomogeneous_T_89 | _pmaPgLevelHomogeneous_T_52; // @[TLBPermissions.scala:101:65]
wire _pmaPgLevelHomogeneous_T_91 = _pmaPgLevelHomogeneous_T_90 | _pmaPgLevelHomogeneous_T_57; // @[TLBPermissions.scala:101:65]
wire _pmaPgLevelHomogeneous_T_92 = _pmaPgLevelHomogeneous_T_91 | _pmaPgLevelHomogeneous_T_62; // @[TLBPermissions.scala:101:65]
wire _pmaPgLevelHomogeneous_T_93 = _pmaPgLevelHomogeneous_T_92 | _pmaPgLevelHomogeneous_T_67; // @[TLBPermissions.scala:101:65]
wire _pmaPgLevelHomogeneous_T_94 = _pmaPgLevelHomogeneous_T_93 | _pmaPgLevelHomogeneous_T_72; // @[TLBPermissions.scala:101:65]
wire _pmaPgLevelHomogeneous_T_95 = _pmaPgLevelHomogeneous_T_94 | _pmaPgLevelHomogeneous_T_77; // @[TLBPermissions.scala:101:65]
wire _pmaPgLevelHomogeneous_T_96 = _pmaPgLevelHomogeneous_T_95 | _pmaPgLevelHomogeneous_T_82; // @[TLBPermissions.scala:101:65]
wire pmaPgLevelHomogeneous_2 = _pmaPgLevelHomogeneous_T_96 | _pmaPgLevelHomogeneous_T_87; // @[TLBPermissions.scala:101:65]
wire [56:0] _pmaPgLevelHomogeneous_T_99 = {1'h0, _pmaPgLevelHomogeneous_T_98}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_100 = _pmaPgLevelHomogeneous_T_99 & 57'h8A110000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_101 = _pmaPgLevelHomogeneous_T_100; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_102 = _pmaPgLevelHomogeneous_T_101 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_103 = _pmaPgLevelHomogeneous_T_102; // @[TLBPermissions.scala:87:66]
wire _pmaPgLevelHomogeneous_T_104 = ~_pmaPgLevelHomogeneous_T_103; // @[TLBPermissions.scala:87:{22,66}]
wire [56:0] _pmaPgLevelHomogeneous_T_106 = {1'h0, _pmaPgLevelHomogeneous_T_105}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_107 = _pmaPgLevelHomogeneous_T_106 & 57'h9E113000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_108 = _pmaPgLevelHomogeneous_T_107; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_109 = _pmaPgLevelHomogeneous_T_108 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_130 = _pmaPgLevelHomogeneous_T_109; // @[TLBPermissions.scala:85:66]
wire [56:0] _pmaPgLevelHomogeneous_T_111 = {1'h0, _pmaPgLevelHomogeneous_T_110}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_112 = _pmaPgLevelHomogeneous_T_111 & 57'h9E113000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_113 = _pmaPgLevelHomogeneous_T_112; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_114 = _pmaPgLevelHomogeneous_T_113 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [56:0] _pmaPgLevelHomogeneous_T_116 = {1'h0, _pmaPgLevelHomogeneous_T_115}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_117 = _pmaPgLevelHomogeneous_T_116 & 57'h9E110000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_118 = _pmaPgLevelHomogeneous_T_117; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_119 = _pmaPgLevelHomogeneous_T_118 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [56:0] _pmaPgLevelHomogeneous_T_121 = {1'h0, _pmaPgLevelHomogeneous_T_120}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_122 = _pmaPgLevelHomogeneous_T_121 & 57'h9E110000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_123 = _pmaPgLevelHomogeneous_T_122; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_124 = _pmaPgLevelHomogeneous_T_123 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire [56:0] _pmaPgLevelHomogeneous_T_126 = {1'h0, _pmaPgLevelHomogeneous_T_125}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_127 = _pmaPgLevelHomogeneous_T_126 & 57'h90000000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_128 = _pmaPgLevelHomogeneous_T_127; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_129 = _pmaPgLevelHomogeneous_T_128 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_131 = _pmaPgLevelHomogeneous_T_130 | _pmaPgLevelHomogeneous_T_114; // @[TLBPermissions.scala:85:66]
wire _pmaPgLevelHomogeneous_T_132 = _pmaPgLevelHomogeneous_T_131 | _pmaPgLevelHomogeneous_T_119; // @[TLBPermissions.scala:85:66]
wire _pmaPgLevelHomogeneous_T_133 = _pmaPgLevelHomogeneous_T_132 | _pmaPgLevelHomogeneous_T_124; // @[TLBPermissions.scala:85:66]
wire _pmaPgLevelHomogeneous_T_134 = _pmaPgLevelHomogeneous_T_133 | _pmaPgLevelHomogeneous_T_129; // @[TLBPermissions.scala:85:66]
wire [56:0] _pmaPgLevelHomogeneous_T_136 = {1'h0, _pmaPgLevelHomogeneous_T_135}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_137 = _pmaPgLevelHomogeneous_T_136 & 57'h8E000000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_138 = _pmaPgLevelHomogeneous_T_137; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_139 = _pmaPgLevelHomogeneous_T_138 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_145 = _pmaPgLevelHomogeneous_T_139; // @[TLBPermissions.scala:85:66]
wire [56:0] _pmaPgLevelHomogeneous_T_141 = {1'h0, _pmaPgLevelHomogeneous_T_140}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_142 = _pmaPgLevelHomogeneous_T_141 & 57'h80000000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_143 = _pmaPgLevelHomogeneous_T_142; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_144 = _pmaPgLevelHomogeneous_T_143 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_146 = _pmaPgLevelHomogeneous_T_145 | _pmaPgLevelHomogeneous_T_144; // @[TLBPermissions.scala:85:66]
wire [56:0] _pmaPgLevelHomogeneous_T_148 = {1'h0, _pmaPgLevelHomogeneous_T_147}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_149 = _pmaPgLevelHomogeneous_T_148 & 57'h8A110000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_150 = _pmaPgLevelHomogeneous_T_149; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_151 = _pmaPgLevelHomogeneous_T_150 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_152 = _pmaPgLevelHomogeneous_T_151; // @[TLBPermissions.scala:87:66]
wire _pmaPgLevelHomogeneous_T_153 = ~_pmaPgLevelHomogeneous_T_152; // @[TLBPermissions.scala:87:{22,66}]
wire [56:0] _pmaPgLevelHomogeneous_T_155 = {1'h0, _pmaPgLevelHomogeneous_T_154}; // @[Parameters.scala:137:{31,41}]
wire [56:0] _pmaPgLevelHomogeneous_T_156 = _pmaPgLevelHomogeneous_T_155 & 57'h8A110000; // @[Parameters.scala:137:{41,46}]
wire [56:0] _pmaPgLevelHomogeneous_T_157 = _pmaPgLevelHomogeneous_T_156; // @[Parameters.scala:137:46]
wire _pmaPgLevelHomogeneous_T_158 = _pmaPgLevelHomogeneous_T_157 == 57'h0; // @[Parameters.scala:137:{46,59}]
wire _pmaPgLevelHomogeneous_T_159 = _pmaPgLevelHomogeneous_T_158; // @[TLBPermissions.scala:87:66]
wire _pmaPgLevelHomogeneous_T_160 = ~_pmaPgLevelHomogeneous_T_159; // @[TLBPermissions.scala:87:{22,66}]
wire _pmaHomogeneous_T_1 = _pmaHomogeneous_T & pmaPgLevelHomogeneous_1; // @[package.scala:39:{76,86}]
wire _pmaHomogeneous_T_3 = _pmaHomogeneous_T_2 ? pmaPgLevelHomogeneous_2 : _pmaHomogeneous_T_1; // @[package.scala:39:{76,86}]
wire _pmaHomogeneous_T_4 = &count; // @[package.scala:39:86]
wire pmaHomogeneous = _pmaHomogeneous_T_4 ? pmaPgLevelHomogeneous_2 : _pmaHomogeneous_T_3; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_1 = io_dpath_pmp_0_cfg_a_0[1]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T = io_dpath_pmp_0_mask_0[29]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_1 = io_dpath_pmp_0_mask_0[20]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_2 = io_dpath_pmp_0_mask_0[11]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_4 = _pmpHomogeneous_maskHomogeneous_T_3 ? _pmpHomogeneous_maskHomogeneous_T_1 : _pmpHomogeneous_maskHomogeneous_T; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_6 = _pmpHomogeneous_maskHomogeneous_T_5 ? _pmpHomogeneous_maskHomogeneous_T_2 : _pmpHomogeneous_maskHomogeneous_T_4; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_7 = &count; // @[package.scala:39:86]
wire pmpHomogeneous_maskHomogeneous = _pmpHomogeneous_maskHomogeneous_T_7 ? _pmpHomogeneous_maskHomogeneous_T_2 : _pmpHomogeneous_maskHomogeneous_T_6; // @[package.scala:39:{76,86}]
wire [31:0] _GEN_4 = {io_dpath_pmp_0_addr_0, 2'h0}; // @[PTW.scala:219:7]
wire [31:0] _pmpHomogeneous_T_2; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_2 = _GEN_4; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_9; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_9 = _GEN_4; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_16; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_16 = _GEN_4; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterUpper_T = _GEN_4; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_1; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeUpper_T_1 = _GEN_4; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_5; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterLower_T_5 = _GEN_4; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_7; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeLower_T_7 = _GEN_4; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_3 = ~_pmpHomogeneous_T_2; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_4 = {_pmpHomogeneous_T_3[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_5 = ~_pmpHomogeneous_T_4; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_6 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_5}; // @[PTW.scala:548:80]
wire [25:0] _pmpHomogeneous_T_7 = _pmpHomogeneous_T_6[55:30]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_8 = |_pmpHomogeneous_T_7; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_10 = ~_pmpHomogeneous_T_9; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_11 = {_pmpHomogeneous_T_10[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_12 = ~_pmpHomogeneous_T_11; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_13 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_12}; // @[PTW.scala:548:80]
wire [34:0] _pmpHomogeneous_T_14 = _pmpHomogeneous_T_13[55:21]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_15 = |_pmpHomogeneous_T_14; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_17 = ~_pmpHomogeneous_T_16; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_18 = {_pmpHomogeneous_T_17[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_19 = ~_pmpHomogeneous_T_18; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_20 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_19}; // @[PTW.scala:548:80]
wire [43:0] _pmpHomogeneous_T_21 = _pmpHomogeneous_T_20[55:12]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_22 = |_pmpHomogeneous_T_21; // @[PMP.scala:98:{66,78}]
wire _pmpHomogeneous_T_24 = _pmpHomogeneous_T_23 ? _pmpHomogeneous_T_15 : _pmpHomogeneous_T_8; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_26 = _pmpHomogeneous_T_25 ? _pmpHomogeneous_T_22 : _pmpHomogeneous_T_24; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_27 = &count; // @[package.scala:39:86]
wire _pmpHomogeneous_T_28 = _pmpHomogeneous_T_27 ? _pmpHomogeneous_T_22 : _pmpHomogeneous_T_26; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_29 = pmpHomogeneous_maskHomogeneous | _pmpHomogeneous_T_28; // @[package.scala:39:76]
wire _pmpHomogeneous_T_30 = io_dpath_pmp_0_cfg_a_0[0]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_T_31 = ~_pmpHomogeneous_T_30; // @[PMP.scala:46:26, :118:45]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_1 = ~_pmpHomogeneous_beginsAfterUpper_T; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_2 = {_pmpHomogeneous_beginsAfterUpper_T_1[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_3 = ~_pmpHomogeneous_beginsAfterUpper_T_2; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterUpper_T_4 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterUpper_T_3}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterUpper = ~_pmpHomogeneous_beginsAfterUpper_T_4; // @[PMP.scala:107:{28,32}]
wire _pmpHomogeneous_T_32 = pmpHomogeneous_beginsAfterUpper; // @[PMP.scala:107:28, :113:21]
wire [31:0] _pmpHomogeneous_pgMask_T_1 = _pmpHomogeneous_pgMask_T ? 32'hFFE00000 : 32'hC0000000; // @[package.scala:39:{76,86}]
wire [31:0] _pmpHomogeneous_pgMask_T_3 = _pmpHomogeneous_pgMask_T_2 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_1; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_pgMask_T_4 = &count; // @[package.scala:39:86]
wire [31:0] pmpHomogeneous_pgMask = _pmpHomogeneous_pgMask_T_4 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_3; // @[package.scala:39:{76,86}]
wire [55:0] _GEN_5 = {24'h0, _pmpHomogeneous_T[31:0] & pmpHomogeneous_pgMask}; // @[package.scala:39:76]
wire [55:0] _pmpHomogeneous_endsBeforeLower_T; // @[PMP.scala:110:30]
assign _pmpHomogeneous_endsBeforeLower_T = _GEN_5; // @[PMP.scala:110:30]
wire [55:0] _pmpHomogeneous_endsBeforeUpper_T; // @[PMP.scala:111:30]
assign _pmpHomogeneous_endsBeforeUpper_T = _GEN_5; // @[PMP.scala:110:30, :111:30]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_2 = ~_pmpHomogeneous_endsBeforeUpper_T_1; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_3 = {_pmpHomogeneous_endsBeforeUpper_T_2[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_4 = ~_pmpHomogeneous_endsBeforeUpper_T_3; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_5 = _pmpHomogeneous_endsBeforeUpper_T_4 & pmpHomogeneous_pgMask; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeUpper = _pmpHomogeneous_endsBeforeUpper_T < {24'h0, _pmpHomogeneous_endsBeforeUpper_T_5}; // @[PMP.scala:111:{30,40,53}]
wire _pmpHomogeneous_T_33 = pmpHomogeneous_endsBeforeUpper; // @[PMP.scala:111:40, :113:62]
wire _pmpHomogeneous_T_34 = _pmpHomogeneous_T_32 | _pmpHomogeneous_T_33; // @[PMP.scala:113:{21,41,62}]
wire _pmpHomogeneous_T_35 = _pmpHomogeneous_T_31 | _pmpHomogeneous_T_34; // @[PMP.scala:113:41, :118:{45,58}]
wire _pmpHomogeneous_T_36 = _pmpHomogeneous_T_1 ? _pmpHomogeneous_T_29 : _pmpHomogeneous_T_35; // @[PMP.scala:45:20, :98:21, :118:{8,58}]
wire _pmpHomogeneous_T_37 = _pmpHomogeneous_T_36; // @[PMP.scala:118:8, :138:10]
wire _pmpHomogeneous_T_38 = io_dpath_pmp_1_cfg_a_0[1]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_8 = io_dpath_pmp_1_mask_0[29]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_9 = io_dpath_pmp_1_mask_0[20]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_10 = io_dpath_pmp_1_mask_0[11]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_12 = _pmpHomogeneous_maskHomogeneous_T_11 ? _pmpHomogeneous_maskHomogeneous_T_9 : _pmpHomogeneous_maskHomogeneous_T_8; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_14 = _pmpHomogeneous_maskHomogeneous_T_13 ? _pmpHomogeneous_maskHomogeneous_T_10 : _pmpHomogeneous_maskHomogeneous_T_12; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_15 = &count; // @[package.scala:39:86]
wire pmpHomogeneous_maskHomogeneous_1 = _pmpHomogeneous_maskHomogeneous_T_15 ? _pmpHomogeneous_maskHomogeneous_T_10 : _pmpHomogeneous_maskHomogeneous_T_14; // @[package.scala:39:{76,86}]
wire [31:0] _GEN_6 = {io_dpath_pmp_1_addr_0, 2'h0}; // @[PTW.scala:219:7]
wire [31:0] _pmpHomogeneous_T_39; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_39 = _GEN_6; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_46; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_46 = _GEN_6; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_53; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_53 = _GEN_6; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_5; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterUpper_T_5 = _GEN_6; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_7; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeUpper_T_7 = _GEN_6; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_10; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterLower_T_10 = _GEN_6; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_13; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeLower_T_13 = _GEN_6; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_40 = ~_pmpHomogeneous_T_39; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_41 = {_pmpHomogeneous_T_40[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_42 = ~_pmpHomogeneous_T_41; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_43 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_42}; // @[PTW.scala:548:80]
wire [25:0] _pmpHomogeneous_T_44 = _pmpHomogeneous_T_43[55:30]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_45 = |_pmpHomogeneous_T_44; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_47 = ~_pmpHomogeneous_T_46; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_48 = {_pmpHomogeneous_T_47[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_49 = ~_pmpHomogeneous_T_48; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_50 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_49}; // @[PTW.scala:548:80]
wire [34:0] _pmpHomogeneous_T_51 = _pmpHomogeneous_T_50[55:21]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_52 = |_pmpHomogeneous_T_51; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_54 = ~_pmpHomogeneous_T_53; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_55 = {_pmpHomogeneous_T_54[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_56 = ~_pmpHomogeneous_T_55; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_57 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_56}; // @[PTW.scala:548:80]
wire [43:0] _pmpHomogeneous_T_58 = _pmpHomogeneous_T_57[55:12]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_59 = |_pmpHomogeneous_T_58; // @[PMP.scala:98:{66,78}]
wire _pmpHomogeneous_T_61 = _pmpHomogeneous_T_60 ? _pmpHomogeneous_T_52 : _pmpHomogeneous_T_45; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_63 = _pmpHomogeneous_T_62 ? _pmpHomogeneous_T_59 : _pmpHomogeneous_T_61; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_64 = &count; // @[package.scala:39:86]
wire _pmpHomogeneous_T_65 = _pmpHomogeneous_T_64 ? _pmpHomogeneous_T_59 : _pmpHomogeneous_T_63; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_66 = pmpHomogeneous_maskHomogeneous_1 | _pmpHomogeneous_T_65; // @[package.scala:39:76]
wire _pmpHomogeneous_T_67 = io_dpath_pmp_1_cfg_a_0[0]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_T_68 = ~_pmpHomogeneous_T_67; // @[PMP.scala:46:26, :118:45]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_6 = ~_pmpHomogeneous_beginsAfterLower_T_5; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_7 = {_pmpHomogeneous_beginsAfterLower_T_6[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_8 = ~_pmpHomogeneous_beginsAfterLower_T_7; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterLower_T_9 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterLower_T_8}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterLower_1 = ~_pmpHomogeneous_beginsAfterLower_T_9; // @[PMP.scala:106:{28,32}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_6 = ~_pmpHomogeneous_beginsAfterUpper_T_5; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_7 = {_pmpHomogeneous_beginsAfterUpper_T_6[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_8 = ~_pmpHomogeneous_beginsAfterUpper_T_7; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterUpper_T_9 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterUpper_T_8}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterUpper_1 = ~_pmpHomogeneous_beginsAfterUpper_T_9; // @[PMP.scala:107:{28,32}]
wire [31:0] _pmpHomogeneous_pgMask_T_6 = _pmpHomogeneous_pgMask_T_5 ? 32'hFFE00000 : 32'hC0000000; // @[package.scala:39:{76,86}]
wire [31:0] _pmpHomogeneous_pgMask_T_8 = _pmpHomogeneous_pgMask_T_7 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_6; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_pgMask_T_9 = &count; // @[package.scala:39:86]
wire [31:0] pmpHomogeneous_pgMask_1 = _pmpHomogeneous_pgMask_T_9 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_8; // @[package.scala:39:{76,86}]
wire [55:0] _GEN_7 = {24'h0, _pmpHomogeneous_T[31:0] & pmpHomogeneous_pgMask_1}; // @[package.scala:39:76]
wire [55:0] _pmpHomogeneous_endsBeforeLower_T_6; // @[PMP.scala:110:30]
assign _pmpHomogeneous_endsBeforeLower_T_6 = _GEN_7; // @[PMP.scala:110:30]
wire [55:0] _pmpHomogeneous_endsBeforeUpper_T_6; // @[PMP.scala:111:30]
assign _pmpHomogeneous_endsBeforeUpper_T_6 = _GEN_7; // @[PMP.scala:110:30, :111:30]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_8 = ~_pmpHomogeneous_endsBeforeLower_T_7; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_9 = {_pmpHomogeneous_endsBeforeLower_T_8[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_10 = ~_pmpHomogeneous_endsBeforeLower_T_9; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_11 = _pmpHomogeneous_endsBeforeLower_T_10 & pmpHomogeneous_pgMask_1; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeLower_1 = _pmpHomogeneous_endsBeforeLower_T_6 < {24'h0, _pmpHomogeneous_endsBeforeLower_T_11}; // @[PMP.scala:110:{30,40,58}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_8 = ~_pmpHomogeneous_endsBeforeUpper_T_7; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_9 = {_pmpHomogeneous_endsBeforeUpper_T_8[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_10 = ~_pmpHomogeneous_endsBeforeUpper_T_9; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_11 = _pmpHomogeneous_endsBeforeUpper_T_10 & pmpHomogeneous_pgMask_1; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeUpper_1 = _pmpHomogeneous_endsBeforeUpper_T_6 < {24'h0, _pmpHomogeneous_endsBeforeUpper_T_11}; // @[PMP.scala:111:{30,40,53}]
wire _pmpHomogeneous_T_69 = pmpHomogeneous_endsBeforeLower_1 | pmpHomogeneous_beginsAfterUpper_1; // @[PMP.scala:107:28, :110:40, :113:21]
wire _pmpHomogeneous_T_70 = pmpHomogeneous_beginsAfterLower_1 & pmpHomogeneous_endsBeforeUpper_1; // @[PMP.scala:106:28, :111:40, :113:62]
wire _pmpHomogeneous_T_71 = _pmpHomogeneous_T_69 | _pmpHomogeneous_T_70; // @[PMP.scala:113:{21,41,62}]
wire _pmpHomogeneous_T_72 = _pmpHomogeneous_T_68 | _pmpHomogeneous_T_71; // @[PMP.scala:113:41, :118:{45,58}]
wire _pmpHomogeneous_T_73 = _pmpHomogeneous_T_38 ? _pmpHomogeneous_T_66 : _pmpHomogeneous_T_72; // @[PMP.scala:45:20, :98:21, :118:{8,58}]
wire _pmpHomogeneous_T_74 = _pmpHomogeneous_T_37 & _pmpHomogeneous_T_73; // @[PMP.scala:118:8, :138:10]
wire _pmpHomogeneous_T_75 = io_dpath_pmp_2_cfg_a_0[1]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_16 = io_dpath_pmp_2_mask_0[29]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_17 = io_dpath_pmp_2_mask_0[20]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_18 = io_dpath_pmp_2_mask_0[11]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_20 = _pmpHomogeneous_maskHomogeneous_T_19 ? _pmpHomogeneous_maskHomogeneous_T_17 : _pmpHomogeneous_maskHomogeneous_T_16; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_22 = _pmpHomogeneous_maskHomogeneous_T_21 ? _pmpHomogeneous_maskHomogeneous_T_18 : _pmpHomogeneous_maskHomogeneous_T_20; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_23 = &count; // @[package.scala:39:86]
wire pmpHomogeneous_maskHomogeneous_2 = _pmpHomogeneous_maskHomogeneous_T_23 ? _pmpHomogeneous_maskHomogeneous_T_18 : _pmpHomogeneous_maskHomogeneous_T_22; // @[package.scala:39:{76,86}]
wire [31:0] _GEN_8 = {io_dpath_pmp_2_addr_0, 2'h0}; // @[PTW.scala:219:7]
wire [31:0] _pmpHomogeneous_T_76; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_76 = _GEN_8; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_83; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_83 = _GEN_8; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_90; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_90 = _GEN_8; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_10; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterUpper_T_10 = _GEN_8; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_13; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeUpper_T_13 = _GEN_8; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_15; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterLower_T_15 = _GEN_8; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_19; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeLower_T_19 = _GEN_8; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_77 = ~_pmpHomogeneous_T_76; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_78 = {_pmpHomogeneous_T_77[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_79 = ~_pmpHomogeneous_T_78; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_80 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_79}; // @[PTW.scala:548:80]
wire [25:0] _pmpHomogeneous_T_81 = _pmpHomogeneous_T_80[55:30]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_82 = |_pmpHomogeneous_T_81; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_84 = ~_pmpHomogeneous_T_83; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_85 = {_pmpHomogeneous_T_84[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_86 = ~_pmpHomogeneous_T_85; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_87 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_86}; // @[PTW.scala:548:80]
wire [34:0] _pmpHomogeneous_T_88 = _pmpHomogeneous_T_87[55:21]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_89 = |_pmpHomogeneous_T_88; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_91 = ~_pmpHomogeneous_T_90; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_92 = {_pmpHomogeneous_T_91[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_93 = ~_pmpHomogeneous_T_92; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_94 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_93}; // @[PTW.scala:548:80]
wire [43:0] _pmpHomogeneous_T_95 = _pmpHomogeneous_T_94[55:12]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_96 = |_pmpHomogeneous_T_95; // @[PMP.scala:98:{66,78}]
wire _pmpHomogeneous_T_98 = _pmpHomogeneous_T_97 ? _pmpHomogeneous_T_89 : _pmpHomogeneous_T_82; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_100 = _pmpHomogeneous_T_99 ? _pmpHomogeneous_T_96 : _pmpHomogeneous_T_98; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_101 = &count; // @[package.scala:39:86]
wire _pmpHomogeneous_T_102 = _pmpHomogeneous_T_101 ? _pmpHomogeneous_T_96 : _pmpHomogeneous_T_100; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_103 = pmpHomogeneous_maskHomogeneous_2 | _pmpHomogeneous_T_102; // @[package.scala:39:76]
wire _pmpHomogeneous_T_104 = io_dpath_pmp_2_cfg_a_0[0]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_T_105 = ~_pmpHomogeneous_T_104; // @[PMP.scala:46:26, :118:45]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_11 = ~_pmpHomogeneous_beginsAfterLower_T_10; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_12 = {_pmpHomogeneous_beginsAfterLower_T_11[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_13 = ~_pmpHomogeneous_beginsAfterLower_T_12; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterLower_T_14 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterLower_T_13}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterLower_2 = ~_pmpHomogeneous_beginsAfterLower_T_14; // @[PMP.scala:106:{28,32}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_11 = ~_pmpHomogeneous_beginsAfterUpper_T_10; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_12 = {_pmpHomogeneous_beginsAfterUpper_T_11[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_13 = ~_pmpHomogeneous_beginsAfterUpper_T_12; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterUpper_T_14 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterUpper_T_13}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterUpper_2 = ~_pmpHomogeneous_beginsAfterUpper_T_14; // @[PMP.scala:107:{28,32}]
wire [31:0] _pmpHomogeneous_pgMask_T_11 = _pmpHomogeneous_pgMask_T_10 ? 32'hFFE00000 : 32'hC0000000; // @[package.scala:39:{76,86}]
wire [31:0] _pmpHomogeneous_pgMask_T_13 = _pmpHomogeneous_pgMask_T_12 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_11; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_pgMask_T_14 = &count; // @[package.scala:39:86]
wire [31:0] pmpHomogeneous_pgMask_2 = _pmpHomogeneous_pgMask_T_14 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_13; // @[package.scala:39:{76,86}]
wire [55:0] _GEN_9 = {24'h0, _pmpHomogeneous_T[31:0] & pmpHomogeneous_pgMask_2}; // @[package.scala:39:76]
wire [55:0] _pmpHomogeneous_endsBeforeLower_T_12; // @[PMP.scala:110:30]
assign _pmpHomogeneous_endsBeforeLower_T_12 = _GEN_9; // @[PMP.scala:110:30]
wire [55:0] _pmpHomogeneous_endsBeforeUpper_T_12; // @[PMP.scala:111:30]
assign _pmpHomogeneous_endsBeforeUpper_T_12 = _GEN_9; // @[PMP.scala:110:30, :111:30]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_14 = ~_pmpHomogeneous_endsBeforeLower_T_13; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_15 = {_pmpHomogeneous_endsBeforeLower_T_14[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_16 = ~_pmpHomogeneous_endsBeforeLower_T_15; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_17 = _pmpHomogeneous_endsBeforeLower_T_16 & pmpHomogeneous_pgMask_2; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeLower_2 = _pmpHomogeneous_endsBeforeLower_T_12 < {24'h0, _pmpHomogeneous_endsBeforeLower_T_17}; // @[PMP.scala:110:{30,40,58}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_14 = ~_pmpHomogeneous_endsBeforeUpper_T_13; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_15 = {_pmpHomogeneous_endsBeforeUpper_T_14[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_16 = ~_pmpHomogeneous_endsBeforeUpper_T_15; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_17 = _pmpHomogeneous_endsBeforeUpper_T_16 & pmpHomogeneous_pgMask_2; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeUpper_2 = _pmpHomogeneous_endsBeforeUpper_T_12 < {24'h0, _pmpHomogeneous_endsBeforeUpper_T_17}; // @[PMP.scala:111:{30,40,53}]
wire _pmpHomogeneous_T_106 = pmpHomogeneous_endsBeforeLower_2 | pmpHomogeneous_beginsAfterUpper_2; // @[PMP.scala:107:28, :110:40, :113:21]
wire _pmpHomogeneous_T_107 = pmpHomogeneous_beginsAfterLower_2 & pmpHomogeneous_endsBeforeUpper_2; // @[PMP.scala:106:28, :111:40, :113:62]
wire _pmpHomogeneous_T_108 = _pmpHomogeneous_T_106 | _pmpHomogeneous_T_107; // @[PMP.scala:113:{21,41,62}]
wire _pmpHomogeneous_T_109 = _pmpHomogeneous_T_105 | _pmpHomogeneous_T_108; // @[PMP.scala:113:41, :118:{45,58}]
wire _pmpHomogeneous_T_110 = _pmpHomogeneous_T_75 ? _pmpHomogeneous_T_103 : _pmpHomogeneous_T_109; // @[PMP.scala:45:20, :98:21, :118:{8,58}]
wire _pmpHomogeneous_T_111 = _pmpHomogeneous_T_74 & _pmpHomogeneous_T_110; // @[PMP.scala:118:8, :138:10]
wire _pmpHomogeneous_T_112 = io_dpath_pmp_3_cfg_a_0[1]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_24 = io_dpath_pmp_3_mask_0[29]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_25 = io_dpath_pmp_3_mask_0[20]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_26 = io_dpath_pmp_3_mask_0[11]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_28 = _pmpHomogeneous_maskHomogeneous_T_27 ? _pmpHomogeneous_maskHomogeneous_T_25 : _pmpHomogeneous_maskHomogeneous_T_24; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_30 = _pmpHomogeneous_maskHomogeneous_T_29 ? _pmpHomogeneous_maskHomogeneous_T_26 : _pmpHomogeneous_maskHomogeneous_T_28; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_31 = &count; // @[package.scala:39:86]
wire pmpHomogeneous_maskHomogeneous_3 = _pmpHomogeneous_maskHomogeneous_T_31 ? _pmpHomogeneous_maskHomogeneous_T_26 : _pmpHomogeneous_maskHomogeneous_T_30; // @[package.scala:39:{76,86}]
wire [31:0] _GEN_10 = {io_dpath_pmp_3_addr_0, 2'h0}; // @[PTW.scala:219:7]
wire [31:0] _pmpHomogeneous_T_113; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_113 = _GEN_10; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_120; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_120 = _GEN_10; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_127; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_127 = _GEN_10; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_15; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterUpper_T_15 = _GEN_10; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_19; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeUpper_T_19 = _GEN_10; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_20; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterLower_T_20 = _GEN_10; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_25; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeLower_T_25 = _GEN_10; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_114 = ~_pmpHomogeneous_T_113; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_115 = {_pmpHomogeneous_T_114[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_116 = ~_pmpHomogeneous_T_115; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_117 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_116}; // @[PTW.scala:548:80]
wire [25:0] _pmpHomogeneous_T_118 = _pmpHomogeneous_T_117[55:30]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_119 = |_pmpHomogeneous_T_118; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_121 = ~_pmpHomogeneous_T_120; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_122 = {_pmpHomogeneous_T_121[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_123 = ~_pmpHomogeneous_T_122; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_124 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_123}; // @[PTW.scala:548:80]
wire [34:0] _pmpHomogeneous_T_125 = _pmpHomogeneous_T_124[55:21]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_126 = |_pmpHomogeneous_T_125; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_128 = ~_pmpHomogeneous_T_127; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_129 = {_pmpHomogeneous_T_128[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_130 = ~_pmpHomogeneous_T_129; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_131 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_130}; // @[PTW.scala:548:80]
wire [43:0] _pmpHomogeneous_T_132 = _pmpHomogeneous_T_131[55:12]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_133 = |_pmpHomogeneous_T_132; // @[PMP.scala:98:{66,78}]
wire _pmpHomogeneous_T_135 = _pmpHomogeneous_T_134 ? _pmpHomogeneous_T_126 : _pmpHomogeneous_T_119; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_137 = _pmpHomogeneous_T_136 ? _pmpHomogeneous_T_133 : _pmpHomogeneous_T_135; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_138 = &count; // @[package.scala:39:86]
wire _pmpHomogeneous_T_139 = _pmpHomogeneous_T_138 ? _pmpHomogeneous_T_133 : _pmpHomogeneous_T_137; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_140 = pmpHomogeneous_maskHomogeneous_3 | _pmpHomogeneous_T_139; // @[package.scala:39:76]
wire _pmpHomogeneous_T_141 = io_dpath_pmp_3_cfg_a_0[0]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_T_142 = ~_pmpHomogeneous_T_141; // @[PMP.scala:46:26, :118:45]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_16 = ~_pmpHomogeneous_beginsAfterLower_T_15; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_17 = {_pmpHomogeneous_beginsAfterLower_T_16[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_18 = ~_pmpHomogeneous_beginsAfterLower_T_17; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterLower_T_19 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterLower_T_18}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterLower_3 = ~_pmpHomogeneous_beginsAfterLower_T_19; // @[PMP.scala:106:{28,32}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_16 = ~_pmpHomogeneous_beginsAfterUpper_T_15; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_17 = {_pmpHomogeneous_beginsAfterUpper_T_16[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_18 = ~_pmpHomogeneous_beginsAfterUpper_T_17; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterUpper_T_19 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterUpper_T_18}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterUpper_3 = ~_pmpHomogeneous_beginsAfterUpper_T_19; // @[PMP.scala:107:{28,32}]
wire [31:0] _pmpHomogeneous_pgMask_T_16 = _pmpHomogeneous_pgMask_T_15 ? 32'hFFE00000 : 32'hC0000000; // @[package.scala:39:{76,86}]
wire [31:0] _pmpHomogeneous_pgMask_T_18 = _pmpHomogeneous_pgMask_T_17 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_16; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_pgMask_T_19 = &count; // @[package.scala:39:86]
wire [31:0] pmpHomogeneous_pgMask_3 = _pmpHomogeneous_pgMask_T_19 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_18; // @[package.scala:39:{76,86}]
wire [55:0] _GEN_11 = {24'h0, _pmpHomogeneous_T[31:0] & pmpHomogeneous_pgMask_3}; // @[package.scala:39:76]
wire [55:0] _pmpHomogeneous_endsBeforeLower_T_18; // @[PMP.scala:110:30]
assign _pmpHomogeneous_endsBeforeLower_T_18 = _GEN_11; // @[PMP.scala:110:30]
wire [55:0] _pmpHomogeneous_endsBeforeUpper_T_18; // @[PMP.scala:111:30]
assign _pmpHomogeneous_endsBeforeUpper_T_18 = _GEN_11; // @[PMP.scala:110:30, :111:30]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_20 = ~_pmpHomogeneous_endsBeforeLower_T_19; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_21 = {_pmpHomogeneous_endsBeforeLower_T_20[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_22 = ~_pmpHomogeneous_endsBeforeLower_T_21; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_23 = _pmpHomogeneous_endsBeforeLower_T_22 & pmpHomogeneous_pgMask_3; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeLower_3 = _pmpHomogeneous_endsBeforeLower_T_18 < {24'h0, _pmpHomogeneous_endsBeforeLower_T_23}; // @[PMP.scala:110:{30,40,58}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_20 = ~_pmpHomogeneous_endsBeforeUpper_T_19; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_21 = {_pmpHomogeneous_endsBeforeUpper_T_20[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_22 = ~_pmpHomogeneous_endsBeforeUpper_T_21; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_23 = _pmpHomogeneous_endsBeforeUpper_T_22 & pmpHomogeneous_pgMask_3; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeUpper_3 = _pmpHomogeneous_endsBeforeUpper_T_18 < {24'h0, _pmpHomogeneous_endsBeforeUpper_T_23}; // @[PMP.scala:111:{30,40,53}]
wire _pmpHomogeneous_T_143 = pmpHomogeneous_endsBeforeLower_3 | pmpHomogeneous_beginsAfterUpper_3; // @[PMP.scala:107:28, :110:40, :113:21]
wire _pmpHomogeneous_T_144 = pmpHomogeneous_beginsAfterLower_3 & pmpHomogeneous_endsBeforeUpper_3; // @[PMP.scala:106:28, :111:40, :113:62]
wire _pmpHomogeneous_T_145 = _pmpHomogeneous_T_143 | _pmpHomogeneous_T_144; // @[PMP.scala:113:{21,41,62}]
wire _pmpHomogeneous_T_146 = _pmpHomogeneous_T_142 | _pmpHomogeneous_T_145; // @[PMP.scala:113:41, :118:{45,58}]
wire _pmpHomogeneous_T_147 = _pmpHomogeneous_T_112 ? _pmpHomogeneous_T_140 : _pmpHomogeneous_T_146; // @[PMP.scala:45:20, :98:21, :118:{8,58}]
wire _pmpHomogeneous_T_148 = _pmpHomogeneous_T_111 & _pmpHomogeneous_T_147; // @[PMP.scala:118:8, :138:10]
wire _pmpHomogeneous_T_149 = io_dpath_pmp_4_cfg_a_0[1]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_32 = io_dpath_pmp_4_mask_0[29]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_33 = io_dpath_pmp_4_mask_0[20]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_34 = io_dpath_pmp_4_mask_0[11]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_36 = _pmpHomogeneous_maskHomogeneous_T_35 ? _pmpHomogeneous_maskHomogeneous_T_33 : _pmpHomogeneous_maskHomogeneous_T_32; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_38 = _pmpHomogeneous_maskHomogeneous_T_37 ? _pmpHomogeneous_maskHomogeneous_T_34 : _pmpHomogeneous_maskHomogeneous_T_36; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_39 = &count; // @[package.scala:39:86]
wire pmpHomogeneous_maskHomogeneous_4 = _pmpHomogeneous_maskHomogeneous_T_39 ? _pmpHomogeneous_maskHomogeneous_T_34 : _pmpHomogeneous_maskHomogeneous_T_38; // @[package.scala:39:{76,86}]
wire [31:0] _GEN_12 = {io_dpath_pmp_4_addr_0, 2'h0}; // @[PTW.scala:219:7]
wire [31:0] _pmpHomogeneous_T_150; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_150 = _GEN_12; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_157; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_157 = _GEN_12; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_164; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_164 = _GEN_12; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_20; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterUpper_T_20 = _GEN_12; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_25; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeUpper_T_25 = _GEN_12; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_25; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterLower_T_25 = _GEN_12; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_31; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeLower_T_31 = _GEN_12; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_151 = ~_pmpHomogeneous_T_150; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_152 = {_pmpHomogeneous_T_151[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_153 = ~_pmpHomogeneous_T_152; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_154 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_153}; // @[PTW.scala:548:80]
wire [25:0] _pmpHomogeneous_T_155 = _pmpHomogeneous_T_154[55:30]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_156 = |_pmpHomogeneous_T_155; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_158 = ~_pmpHomogeneous_T_157; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_159 = {_pmpHomogeneous_T_158[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_160 = ~_pmpHomogeneous_T_159; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_161 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_160}; // @[PTW.scala:548:80]
wire [34:0] _pmpHomogeneous_T_162 = _pmpHomogeneous_T_161[55:21]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_163 = |_pmpHomogeneous_T_162; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_165 = ~_pmpHomogeneous_T_164; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_166 = {_pmpHomogeneous_T_165[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_167 = ~_pmpHomogeneous_T_166; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_168 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_167}; // @[PTW.scala:548:80]
wire [43:0] _pmpHomogeneous_T_169 = _pmpHomogeneous_T_168[55:12]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_170 = |_pmpHomogeneous_T_169; // @[PMP.scala:98:{66,78}]
wire _pmpHomogeneous_T_172 = _pmpHomogeneous_T_171 ? _pmpHomogeneous_T_163 : _pmpHomogeneous_T_156; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_174 = _pmpHomogeneous_T_173 ? _pmpHomogeneous_T_170 : _pmpHomogeneous_T_172; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_175 = &count; // @[package.scala:39:86]
wire _pmpHomogeneous_T_176 = _pmpHomogeneous_T_175 ? _pmpHomogeneous_T_170 : _pmpHomogeneous_T_174; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_177 = pmpHomogeneous_maskHomogeneous_4 | _pmpHomogeneous_T_176; // @[package.scala:39:76]
wire _pmpHomogeneous_T_178 = io_dpath_pmp_4_cfg_a_0[0]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_T_179 = ~_pmpHomogeneous_T_178; // @[PMP.scala:46:26, :118:45]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_21 = ~_pmpHomogeneous_beginsAfterLower_T_20; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_22 = {_pmpHomogeneous_beginsAfterLower_T_21[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_23 = ~_pmpHomogeneous_beginsAfterLower_T_22; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterLower_T_24 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterLower_T_23}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterLower_4 = ~_pmpHomogeneous_beginsAfterLower_T_24; // @[PMP.scala:106:{28,32}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_21 = ~_pmpHomogeneous_beginsAfterUpper_T_20; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_22 = {_pmpHomogeneous_beginsAfterUpper_T_21[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_23 = ~_pmpHomogeneous_beginsAfterUpper_T_22; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterUpper_T_24 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterUpper_T_23}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterUpper_4 = ~_pmpHomogeneous_beginsAfterUpper_T_24; // @[PMP.scala:107:{28,32}]
wire [31:0] _pmpHomogeneous_pgMask_T_21 = _pmpHomogeneous_pgMask_T_20 ? 32'hFFE00000 : 32'hC0000000; // @[package.scala:39:{76,86}]
wire [31:0] _pmpHomogeneous_pgMask_T_23 = _pmpHomogeneous_pgMask_T_22 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_21; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_pgMask_T_24 = &count; // @[package.scala:39:86]
wire [31:0] pmpHomogeneous_pgMask_4 = _pmpHomogeneous_pgMask_T_24 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_23; // @[package.scala:39:{76,86}]
wire [55:0] _GEN_13 = {24'h0, _pmpHomogeneous_T[31:0] & pmpHomogeneous_pgMask_4}; // @[package.scala:39:76]
wire [55:0] _pmpHomogeneous_endsBeforeLower_T_24; // @[PMP.scala:110:30]
assign _pmpHomogeneous_endsBeforeLower_T_24 = _GEN_13; // @[PMP.scala:110:30]
wire [55:0] _pmpHomogeneous_endsBeforeUpper_T_24; // @[PMP.scala:111:30]
assign _pmpHomogeneous_endsBeforeUpper_T_24 = _GEN_13; // @[PMP.scala:110:30, :111:30]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_26 = ~_pmpHomogeneous_endsBeforeLower_T_25; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_27 = {_pmpHomogeneous_endsBeforeLower_T_26[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_28 = ~_pmpHomogeneous_endsBeforeLower_T_27; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_29 = _pmpHomogeneous_endsBeforeLower_T_28 & pmpHomogeneous_pgMask_4; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeLower_4 = _pmpHomogeneous_endsBeforeLower_T_24 < {24'h0, _pmpHomogeneous_endsBeforeLower_T_29}; // @[PMP.scala:110:{30,40,58}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_26 = ~_pmpHomogeneous_endsBeforeUpper_T_25; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_27 = {_pmpHomogeneous_endsBeforeUpper_T_26[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_28 = ~_pmpHomogeneous_endsBeforeUpper_T_27; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_29 = _pmpHomogeneous_endsBeforeUpper_T_28 & pmpHomogeneous_pgMask_4; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeUpper_4 = _pmpHomogeneous_endsBeforeUpper_T_24 < {24'h0, _pmpHomogeneous_endsBeforeUpper_T_29}; // @[PMP.scala:111:{30,40,53}]
wire _pmpHomogeneous_T_180 = pmpHomogeneous_endsBeforeLower_4 | pmpHomogeneous_beginsAfterUpper_4; // @[PMP.scala:107:28, :110:40, :113:21]
wire _pmpHomogeneous_T_181 = pmpHomogeneous_beginsAfterLower_4 & pmpHomogeneous_endsBeforeUpper_4; // @[PMP.scala:106:28, :111:40, :113:62]
wire _pmpHomogeneous_T_182 = _pmpHomogeneous_T_180 | _pmpHomogeneous_T_181; // @[PMP.scala:113:{21,41,62}]
wire _pmpHomogeneous_T_183 = _pmpHomogeneous_T_179 | _pmpHomogeneous_T_182; // @[PMP.scala:113:41, :118:{45,58}]
wire _pmpHomogeneous_T_184 = _pmpHomogeneous_T_149 ? _pmpHomogeneous_T_177 : _pmpHomogeneous_T_183; // @[PMP.scala:45:20, :98:21, :118:{8,58}]
wire _pmpHomogeneous_T_185 = _pmpHomogeneous_T_148 & _pmpHomogeneous_T_184; // @[PMP.scala:118:8, :138:10]
wire _pmpHomogeneous_T_186 = io_dpath_pmp_5_cfg_a_0[1]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_40 = io_dpath_pmp_5_mask_0[29]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_41 = io_dpath_pmp_5_mask_0[20]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_42 = io_dpath_pmp_5_mask_0[11]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_44 = _pmpHomogeneous_maskHomogeneous_T_43 ? _pmpHomogeneous_maskHomogeneous_T_41 : _pmpHomogeneous_maskHomogeneous_T_40; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_46 = _pmpHomogeneous_maskHomogeneous_T_45 ? _pmpHomogeneous_maskHomogeneous_T_42 : _pmpHomogeneous_maskHomogeneous_T_44; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_47 = &count; // @[package.scala:39:86]
wire pmpHomogeneous_maskHomogeneous_5 = _pmpHomogeneous_maskHomogeneous_T_47 ? _pmpHomogeneous_maskHomogeneous_T_42 : _pmpHomogeneous_maskHomogeneous_T_46; // @[package.scala:39:{76,86}]
wire [31:0] _GEN_14 = {io_dpath_pmp_5_addr_0, 2'h0}; // @[PTW.scala:219:7]
wire [31:0] _pmpHomogeneous_T_187; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_187 = _GEN_14; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_194; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_194 = _GEN_14; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_201; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_201 = _GEN_14; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_25; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterUpper_T_25 = _GEN_14; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_31; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeUpper_T_31 = _GEN_14; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_30; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterLower_T_30 = _GEN_14; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_37; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeLower_T_37 = _GEN_14; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_188 = ~_pmpHomogeneous_T_187; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_189 = {_pmpHomogeneous_T_188[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_190 = ~_pmpHomogeneous_T_189; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_191 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_190}; // @[PTW.scala:548:80]
wire [25:0] _pmpHomogeneous_T_192 = _pmpHomogeneous_T_191[55:30]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_193 = |_pmpHomogeneous_T_192; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_195 = ~_pmpHomogeneous_T_194; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_196 = {_pmpHomogeneous_T_195[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_197 = ~_pmpHomogeneous_T_196; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_198 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_197}; // @[PTW.scala:548:80]
wire [34:0] _pmpHomogeneous_T_199 = _pmpHomogeneous_T_198[55:21]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_200 = |_pmpHomogeneous_T_199; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_202 = ~_pmpHomogeneous_T_201; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_203 = {_pmpHomogeneous_T_202[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_204 = ~_pmpHomogeneous_T_203; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_205 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_204}; // @[PTW.scala:548:80]
wire [43:0] _pmpHomogeneous_T_206 = _pmpHomogeneous_T_205[55:12]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_207 = |_pmpHomogeneous_T_206; // @[PMP.scala:98:{66,78}]
wire _pmpHomogeneous_T_209 = _pmpHomogeneous_T_208 ? _pmpHomogeneous_T_200 : _pmpHomogeneous_T_193; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_211 = _pmpHomogeneous_T_210 ? _pmpHomogeneous_T_207 : _pmpHomogeneous_T_209; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_212 = &count; // @[package.scala:39:86]
wire _pmpHomogeneous_T_213 = _pmpHomogeneous_T_212 ? _pmpHomogeneous_T_207 : _pmpHomogeneous_T_211; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_214 = pmpHomogeneous_maskHomogeneous_5 | _pmpHomogeneous_T_213; // @[package.scala:39:76]
wire _pmpHomogeneous_T_215 = io_dpath_pmp_5_cfg_a_0[0]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_T_216 = ~_pmpHomogeneous_T_215; // @[PMP.scala:46:26, :118:45]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_26 = ~_pmpHomogeneous_beginsAfterLower_T_25; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_27 = {_pmpHomogeneous_beginsAfterLower_T_26[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_28 = ~_pmpHomogeneous_beginsAfterLower_T_27; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterLower_T_29 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterLower_T_28}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterLower_5 = ~_pmpHomogeneous_beginsAfterLower_T_29; // @[PMP.scala:106:{28,32}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_26 = ~_pmpHomogeneous_beginsAfterUpper_T_25; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_27 = {_pmpHomogeneous_beginsAfterUpper_T_26[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_28 = ~_pmpHomogeneous_beginsAfterUpper_T_27; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterUpper_T_29 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterUpper_T_28}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterUpper_5 = ~_pmpHomogeneous_beginsAfterUpper_T_29; // @[PMP.scala:107:{28,32}]
wire [31:0] _pmpHomogeneous_pgMask_T_26 = _pmpHomogeneous_pgMask_T_25 ? 32'hFFE00000 : 32'hC0000000; // @[package.scala:39:{76,86}]
wire [31:0] _pmpHomogeneous_pgMask_T_28 = _pmpHomogeneous_pgMask_T_27 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_26; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_pgMask_T_29 = &count; // @[package.scala:39:86]
wire [31:0] pmpHomogeneous_pgMask_5 = _pmpHomogeneous_pgMask_T_29 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_28; // @[package.scala:39:{76,86}]
wire [55:0] _GEN_15 = {24'h0, _pmpHomogeneous_T[31:0] & pmpHomogeneous_pgMask_5}; // @[package.scala:39:76]
wire [55:0] _pmpHomogeneous_endsBeforeLower_T_30; // @[PMP.scala:110:30]
assign _pmpHomogeneous_endsBeforeLower_T_30 = _GEN_15; // @[PMP.scala:110:30]
wire [55:0] _pmpHomogeneous_endsBeforeUpper_T_30; // @[PMP.scala:111:30]
assign _pmpHomogeneous_endsBeforeUpper_T_30 = _GEN_15; // @[PMP.scala:110:30, :111:30]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_32 = ~_pmpHomogeneous_endsBeforeLower_T_31; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_33 = {_pmpHomogeneous_endsBeforeLower_T_32[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_34 = ~_pmpHomogeneous_endsBeforeLower_T_33; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_35 = _pmpHomogeneous_endsBeforeLower_T_34 & pmpHomogeneous_pgMask_5; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeLower_5 = _pmpHomogeneous_endsBeforeLower_T_30 < {24'h0, _pmpHomogeneous_endsBeforeLower_T_35}; // @[PMP.scala:110:{30,40,58}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_32 = ~_pmpHomogeneous_endsBeforeUpper_T_31; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_33 = {_pmpHomogeneous_endsBeforeUpper_T_32[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_34 = ~_pmpHomogeneous_endsBeforeUpper_T_33; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_35 = _pmpHomogeneous_endsBeforeUpper_T_34 & pmpHomogeneous_pgMask_5; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeUpper_5 = _pmpHomogeneous_endsBeforeUpper_T_30 < {24'h0, _pmpHomogeneous_endsBeforeUpper_T_35}; // @[PMP.scala:111:{30,40,53}]
wire _pmpHomogeneous_T_217 = pmpHomogeneous_endsBeforeLower_5 | pmpHomogeneous_beginsAfterUpper_5; // @[PMP.scala:107:28, :110:40, :113:21]
wire _pmpHomogeneous_T_218 = pmpHomogeneous_beginsAfterLower_5 & pmpHomogeneous_endsBeforeUpper_5; // @[PMP.scala:106:28, :111:40, :113:62]
wire _pmpHomogeneous_T_219 = _pmpHomogeneous_T_217 | _pmpHomogeneous_T_218; // @[PMP.scala:113:{21,41,62}]
wire _pmpHomogeneous_T_220 = _pmpHomogeneous_T_216 | _pmpHomogeneous_T_219; // @[PMP.scala:113:41, :118:{45,58}]
wire _pmpHomogeneous_T_221 = _pmpHomogeneous_T_186 ? _pmpHomogeneous_T_214 : _pmpHomogeneous_T_220; // @[PMP.scala:45:20, :98:21, :118:{8,58}]
wire _pmpHomogeneous_T_222 = _pmpHomogeneous_T_185 & _pmpHomogeneous_T_221; // @[PMP.scala:118:8, :138:10]
wire _pmpHomogeneous_T_223 = io_dpath_pmp_6_cfg_a_0[1]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_48 = io_dpath_pmp_6_mask_0[29]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_49 = io_dpath_pmp_6_mask_0[20]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_50 = io_dpath_pmp_6_mask_0[11]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_52 = _pmpHomogeneous_maskHomogeneous_T_51 ? _pmpHomogeneous_maskHomogeneous_T_49 : _pmpHomogeneous_maskHomogeneous_T_48; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_54 = _pmpHomogeneous_maskHomogeneous_T_53 ? _pmpHomogeneous_maskHomogeneous_T_50 : _pmpHomogeneous_maskHomogeneous_T_52; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_55 = &count; // @[package.scala:39:86]
wire pmpHomogeneous_maskHomogeneous_6 = _pmpHomogeneous_maskHomogeneous_T_55 ? _pmpHomogeneous_maskHomogeneous_T_50 : _pmpHomogeneous_maskHomogeneous_T_54; // @[package.scala:39:{76,86}]
wire [31:0] _GEN_16 = {io_dpath_pmp_6_addr_0, 2'h0}; // @[PTW.scala:219:7]
wire [31:0] _pmpHomogeneous_T_224; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_224 = _GEN_16; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_231; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_231 = _GEN_16; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_238; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_238 = _GEN_16; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_30; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterUpper_T_30 = _GEN_16; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_37; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeUpper_T_37 = _GEN_16; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_35; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterLower_T_35 = _GEN_16; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_43; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeLower_T_43 = _GEN_16; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_225 = ~_pmpHomogeneous_T_224; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_226 = {_pmpHomogeneous_T_225[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_227 = ~_pmpHomogeneous_T_226; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_228 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_227}; // @[PTW.scala:548:80]
wire [25:0] _pmpHomogeneous_T_229 = _pmpHomogeneous_T_228[55:30]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_230 = |_pmpHomogeneous_T_229; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_232 = ~_pmpHomogeneous_T_231; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_233 = {_pmpHomogeneous_T_232[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_234 = ~_pmpHomogeneous_T_233; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_235 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_234}; // @[PTW.scala:548:80]
wire [34:0] _pmpHomogeneous_T_236 = _pmpHomogeneous_T_235[55:21]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_237 = |_pmpHomogeneous_T_236; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_239 = ~_pmpHomogeneous_T_238; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_240 = {_pmpHomogeneous_T_239[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_241 = ~_pmpHomogeneous_T_240; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_242 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_241}; // @[PTW.scala:548:80]
wire [43:0] _pmpHomogeneous_T_243 = _pmpHomogeneous_T_242[55:12]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_244 = |_pmpHomogeneous_T_243; // @[PMP.scala:98:{66,78}]
wire _pmpHomogeneous_T_246 = _pmpHomogeneous_T_245 ? _pmpHomogeneous_T_237 : _pmpHomogeneous_T_230; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_248 = _pmpHomogeneous_T_247 ? _pmpHomogeneous_T_244 : _pmpHomogeneous_T_246; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_249 = &count; // @[package.scala:39:86]
wire _pmpHomogeneous_T_250 = _pmpHomogeneous_T_249 ? _pmpHomogeneous_T_244 : _pmpHomogeneous_T_248; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_251 = pmpHomogeneous_maskHomogeneous_6 | _pmpHomogeneous_T_250; // @[package.scala:39:76]
wire _pmpHomogeneous_T_252 = io_dpath_pmp_6_cfg_a_0[0]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_T_253 = ~_pmpHomogeneous_T_252; // @[PMP.scala:46:26, :118:45]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_31 = ~_pmpHomogeneous_beginsAfterLower_T_30; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_32 = {_pmpHomogeneous_beginsAfterLower_T_31[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_33 = ~_pmpHomogeneous_beginsAfterLower_T_32; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterLower_T_34 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterLower_T_33}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterLower_6 = ~_pmpHomogeneous_beginsAfterLower_T_34; // @[PMP.scala:106:{28,32}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_31 = ~_pmpHomogeneous_beginsAfterUpper_T_30; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_32 = {_pmpHomogeneous_beginsAfterUpper_T_31[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_33 = ~_pmpHomogeneous_beginsAfterUpper_T_32; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterUpper_T_34 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterUpper_T_33}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterUpper_6 = ~_pmpHomogeneous_beginsAfterUpper_T_34; // @[PMP.scala:107:{28,32}]
wire [31:0] _pmpHomogeneous_pgMask_T_31 = _pmpHomogeneous_pgMask_T_30 ? 32'hFFE00000 : 32'hC0000000; // @[package.scala:39:{76,86}]
wire [31:0] _pmpHomogeneous_pgMask_T_33 = _pmpHomogeneous_pgMask_T_32 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_31; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_pgMask_T_34 = &count; // @[package.scala:39:86]
wire [31:0] pmpHomogeneous_pgMask_6 = _pmpHomogeneous_pgMask_T_34 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_33; // @[package.scala:39:{76,86}]
wire [55:0] _GEN_17 = {24'h0, _pmpHomogeneous_T[31:0] & pmpHomogeneous_pgMask_6}; // @[package.scala:39:76]
wire [55:0] _pmpHomogeneous_endsBeforeLower_T_36; // @[PMP.scala:110:30]
assign _pmpHomogeneous_endsBeforeLower_T_36 = _GEN_17; // @[PMP.scala:110:30]
wire [55:0] _pmpHomogeneous_endsBeforeUpper_T_36; // @[PMP.scala:111:30]
assign _pmpHomogeneous_endsBeforeUpper_T_36 = _GEN_17; // @[PMP.scala:110:30, :111:30]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_38 = ~_pmpHomogeneous_endsBeforeLower_T_37; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_39 = {_pmpHomogeneous_endsBeforeLower_T_38[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_40 = ~_pmpHomogeneous_endsBeforeLower_T_39; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_41 = _pmpHomogeneous_endsBeforeLower_T_40 & pmpHomogeneous_pgMask_6; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeLower_6 = _pmpHomogeneous_endsBeforeLower_T_36 < {24'h0, _pmpHomogeneous_endsBeforeLower_T_41}; // @[PMP.scala:110:{30,40,58}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_38 = ~_pmpHomogeneous_endsBeforeUpper_T_37; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_39 = {_pmpHomogeneous_endsBeforeUpper_T_38[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_40 = ~_pmpHomogeneous_endsBeforeUpper_T_39; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_41 = _pmpHomogeneous_endsBeforeUpper_T_40 & pmpHomogeneous_pgMask_6; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeUpper_6 = _pmpHomogeneous_endsBeforeUpper_T_36 < {24'h0, _pmpHomogeneous_endsBeforeUpper_T_41}; // @[PMP.scala:111:{30,40,53}]
wire _pmpHomogeneous_T_254 = pmpHomogeneous_endsBeforeLower_6 | pmpHomogeneous_beginsAfterUpper_6; // @[PMP.scala:107:28, :110:40, :113:21]
wire _pmpHomogeneous_T_255 = pmpHomogeneous_beginsAfterLower_6 & pmpHomogeneous_endsBeforeUpper_6; // @[PMP.scala:106:28, :111:40, :113:62]
wire _pmpHomogeneous_T_256 = _pmpHomogeneous_T_254 | _pmpHomogeneous_T_255; // @[PMP.scala:113:{21,41,62}]
wire _pmpHomogeneous_T_257 = _pmpHomogeneous_T_253 | _pmpHomogeneous_T_256; // @[PMP.scala:113:41, :118:{45,58}]
wire _pmpHomogeneous_T_258 = _pmpHomogeneous_T_223 ? _pmpHomogeneous_T_251 : _pmpHomogeneous_T_257; // @[PMP.scala:45:20, :98:21, :118:{8,58}]
wire _pmpHomogeneous_T_259 = _pmpHomogeneous_T_222 & _pmpHomogeneous_T_258; // @[PMP.scala:118:8, :138:10]
wire _pmpHomogeneous_T_260 = io_dpath_pmp_7_cfg_a_0[1]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_56 = io_dpath_pmp_7_mask_0[29]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_57 = io_dpath_pmp_7_mask_0[20]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_58 = io_dpath_pmp_7_mask_0[11]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_maskHomogeneous_T_60 = _pmpHomogeneous_maskHomogeneous_T_59 ? _pmpHomogeneous_maskHomogeneous_T_57 : _pmpHomogeneous_maskHomogeneous_T_56; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_62 = _pmpHomogeneous_maskHomogeneous_T_61 ? _pmpHomogeneous_maskHomogeneous_T_58 : _pmpHomogeneous_maskHomogeneous_T_60; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_maskHomogeneous_T_63 = &count; // @[package.scala:39:86]
wire pmpHomogeneous_maskHomogeneous_7 = _pmpHomogeneous_maskHomogeneous_T_63 ? _pmpHomogeneous_maskHomogeneous_T_58 : _pmpHomogeneous_maskHomogeneous_T_62; // @[package.scala:39:{76,86}]
wire [31:0] _GEN_18 = {io_dpath_pmp_7_addr_0, 2'h0}; // @[PTW.scala:219:7]
wire [31:0] _pmpHomogeneous_T_261; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_261 = _GEN_18; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_268; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_268 = _GEN_18; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_275; // @[PMP.scala:60:36]
assign _pmpHomogeneous_T_275 = _GEN_18; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_35; // @[PMP.scala:60:36]
assign _pmpHomogeneous_beginsAfterUpper_T_35 = _GEN_18; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_43; // @[PMP.scala:60:36]
assign _pmpHomogeneous_endsBeforeUpper_T_43 = _GEN_18; // @[PMP.scala:60:36]
wire [31:0] _pmpHomogeneous_T_262 = ~_pmpHomogeneous_T_261; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_263 = {_pmpHomogeneous_T_262[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_264 = ~_pmpHomogeneous_T_263; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_265 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_264}; // @[PTW.scala:548:80]
wire [25:0] _pmpHomogeneous_T_266 = _pmpHomogeneous_T_265[55:30]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_267 = |_pmpHomogeneous_T_266; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_269 = ~_pmpHomogeneous_T_268; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_270 = {_pmpHomogeneous_T_269[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_271 = ~_pmpHomogeneous_T_270; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_272 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_271}; // @[PTW.scala:548:80]
wire [34:0] _pmpHomogeneous_T_273 = _pmpHomogeneous_T_272[55:21]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_274 = |_pmpHomogeneous_T_273; // @[PMP.scala:98:{66,78}]
wire [31:0] _pmpHomogeneous_T_276 = ~_pmpHomogeneous_T_275; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_T_277 = {_pmpHomogeneous_T_276[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_T_278 = ~_pmpHomogeneous_T_277; // @[PMP.scala:60:{27,48}]
wire [55:0] _pmpHomogeneous_T_279 = {_pmpHomogeneous_T[55:32], _pmpHomogeneous_T[31:0] ^ _pmpHomogeneous_T_278}; // @[PTW.scala:548:80]
wire [43:0] _pmpHomogeneous_T_280 = _pmpHomogeneous_T_279[55:12]; // @[PMP.scala:98:{53,66}]
wire _pmpHomogeneous_T_281 = |_pmpHomogeneous_T_280; // @[PMP.scala:98:{66,78}]
wire _pmpHomogeneous_T_283 = _pmpHomogeneous_T_282 ? _pmpHomogeneous_T_274 : _pmpHomogeneous_T_267; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_285 = _pmpHomogeneous_T_284 ? _pmpHomogeneous_T_281 : _pmpHomogeneous_T_283; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_286 = &count; // @[package.scala:39:86]
wire _pmpHomogeneous_T_287 = _pmpHomogeneous_T_286 ? _pmpHomogeneous_T_281 : _pmpHomogeneous_T_285; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_T_288 = pmpHomogeneous_maskHomogeneous_7 | _pmpHomogeneous_T_287; // @[package.scala:39:76]
wire _pmpHomogeneous_T_289 = io_dpath_pmp_7_cfg_a_0[0]; // @[PTW.scala:219:7]
wire _pmpHomogeneous_T_290 = ~_pmpHomogeneous_T_289; // @[PMP.scala:46:26, :118:45]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_36 = ~_pmpHomogeneous_beginsAfterLower_T_35; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_37 = {_pmpHomogeneous_beginsAfterLower_T_36[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterLower_T_38 = ~_pmpHomogeneous_beginsAfterLower_T_37; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterLower_T_39 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterLower_T_38}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterLower_7 = ~_pmpHomogeneous_beginsAfterLower_T_39; // @[PMP.scala:106:{28,32}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_36 = ~_pmpHomogeneous_beginsAfterUpper_T_35; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_37 = {_pmpHomogeneous_beginsAfterUpper_T_36[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_beginsAfterUpper_T_38 = ~_pmpHomogeneous_beginsAfterUpper_T_37; // @[PMP.scala:60:{27,48}]
wire _pmpHomogeneous_beginsAfterUpper_T_39 = _pmpHomogeneous_T < {24'h0, _pmpHomogeneous_beginsAfterUpper_T_38}; // @[PTW.scala:548:80]
wire pmpHomogeneous_beginsAfterUpper_7 = ~_pmpHomogeneous_beginsAfterUpper_T_39; // @[PMP.scala:107:{28,32}]
wire [31:0] _pmpHomogeneous_pgMask_T_36 = _pmpHomogeneous_pgMask_T_35 ? 32'hFFE00000 : 32'hC0000000; // @[package.scala:39:{76,86}]
wire [31:0] _pmpHomogeneous_pgMask_T_38 = _pmpHomogeneous_pgMask_T_37 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_36; // @[package.scala:39:{76,86}]
wire _pmpHomogeneous_pgMask_T_39 = &count; // @[package.scala:39:86]
wire [31:0] pmpHomogeneous_pgMask_7 = _pmpHomogeneous_pgMask_T_39 ? 32'hFFFFF000 : _pmpHomogeneous_pgMask_T_38; // @[package.scala:39:{76,86}]
wire [55:0] _GEN_19 = {24'h0, _pmpHomogeneous_T[31:0] & pmpHomogeneous_pgMask_7}; // @[package.scala:39:76]
wire [55:0] _pmpHomogeneous_endsBeforeLower_T_42; // @[PMP.scala:110:30]
assign _pmpHomogeneous_endsBeforeLower_T_42 = _GEN_19; // @[PMP.scala:110:30]
wire [55:0] _pmpHomogeneous_endsBeforeUpper_T_42; // @[PMP.scala:111:30]
assign _pmpHomogeneous_endsBeforeUpper_T_42 = _GEN_19; // @[PMP.scala:110:30, :111:30]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_44 = ~_pmpHomogeneous_endsBeforeLower_T_43; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_45 = {_pmpHomogeneous_endsBeforeLower_T_44[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_46 = ~_pmpHomogeneous_endsBeforeLower_T_45; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeLower_T_47 = _pmpHomogeneous_endsBeforeLower_T_46 & pmpHomogeneous_pgMask_7; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeLower_7 = _pmpHomogeneous_endsBeforeLower_T_42 < {24'h0, _pmpHomogeneous_endsBeforeLower_T_47}; // @[PMP.scala:110:{30,40,58}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_44 = ~_pmpHomogeneous_endsBeforeUpper_T_43; // @[PMP.scala:60:{29,36}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_45 = {_pmpHomogeneous_endsBeforeUpper_T_44[31:2], 2'h3}; // @[PMP.scala:60:{29,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_46 = ~_pmpHomogeneous_endsBeforeUpper_T_45; // @[PMP.scala:60:{27,48}]
wire [31:0] _pmpHomogeneous_endsBeforeUpper_T_47 = _pmpHomogeneous_endsBeforeUpper_T_46 & pmpHomogeneous_pgMask_7; // @[package.scala:39:76]
wire pmpHomogeneous_endsBeforeUpper_7 = _pmpHomogeneous_endsBeforeUpper_T_42 < {24'h0, _pmpHomogeneous_endsBeforeUpper_T_47}; // @[PMP.scala:111:{30,40,53}]
wire _pmpHomogeneous_T_291 = pmpHomogeneous_endsBeforeLower_7 | pmpHomogeneous_beginsAfterUpper_7; // @[PMP.scala:107:28, :110:40, :113:21]
wire _pmpHomogeneous_T_292 = pmpHomogeneous_beginsAfterLower_7 & pmpHomogeneous_endsBeforeUpper_7; // @[PMP.scala:106:28, :111:40, :113:62]
wire _pmpHomogeneous_T_293 = _pmpHomogeneous_T_291 | _pmpHomogeneous_T_292; // @[PMP.scala:113:{21,41,62}]
wire _pmpHomogeneous_T_294 = _pmpHomogeneous_T_290 | _pmpHomogeneous_T_293; // @[PMP.scala:113:41, :118:{45,58}]
wire _pmpHomogeneous_T_295 = _pmpHomogeneous_T_260 ? _pmpHomogeneous_T_288 : _pmpHomogeneous_T_294; // @[PMP.scala:45:20, :98:21, :118:{8,58}]
wire pmpHomogeneous = _pmpHomogeneous_T_259 & _pmpHomogeneous_T_295; // @[PMP.scala:118:8, :138:10]
wire homogeneous = pmaHomogeneous & pmpHomogeneous; // @[package.scala:39:76]
assign _io_requestor_0_resp_bits_homogeneous_T = homogeneous; // @[PTW.scala:549:36, :562:58]
assign _io_requestor_1_resp_bits_homogeneous_T = homogeneous; // @[PTW.scala:549:36, :562:58]
assign io_requestor_0_resp_bits_homogeneous_0 = _io_requestor_0_resp_bits_homogeneous_T; // @[PTW.scala:219:7, :562:58]
wire _io_requestor_0_resp_bits_gpa_bits_T = ~stage2_final; // @[PTW.scala:283:25, :357:107, :566:15]
wire _io_requestor_0_resp_bits_gpa_bits_T_1 = ~r_req_vstage1; // @[PTW.scala:270:18, :566:32]
wire _io_requestor_0_resp_bits_gpa_bits_T_2 = _io_requestor_0_resp_bits_gpa_bits_T | _io_requestor_0_resp_bits_gpa_bits_T_1; // @[PTW.scala:566:{15,29,32}]
wire _T_171 = aux_count == 2'h2; // @[PTW.scala:278:22, :566:60]
wire _io_requestor_0_resp_bits_gpa_bits_T_3; // @[PTW.scala:566:60]
assign _io_requestor_0_resp_bits_gpa_bits_T_3 = _T_171; // @[PTW.scala:566:60]
wire _io_requestor_1_resp_bits_gpa_bits_T_3; // @[PTW.scala:566:60]
assign _io_requestor_1_resp_bits_gpa_bits_T_3 = _T_171; // @[PTW.scala:566:60]
wire _gpa_pgoff_T; // @[PTW.scala:615:36]
assign _gpa_pgoff_T = _T_171; // @[PTW.scala:566:60, :615:36]
wire _l2_refill_T_7; // @[PTW.scala:715:40]
assign _l2_refill_T_7 = _T_171; // @[PTW.scala:566:60, :715:40]
wire _io_requestor_0_resp_bits_gpa_bits_T_4 = _io_requestor_0_resp_bits_gpa_bits_T_2 | _io_requestor_0_resp_bits_gpa_bits_T_3; // @[PTW.scala:566:{29,47,60}]
wire [25:0] _io_requestor_0_resp_bits_gpa_bits_T_5 = aux_pte_ppn[43:18]; // @[PTW.scala:280:20, :343:49]
wire [25:0] _io_requestor_1_resp_bits_gpa_bits_T_5 = aux_pte_ppn[43:18]; // @[PTW.scala:280:20, :343:49]
wire [17:0] _io_requestor_0_resp_bits_gpa_bits_T_6 = r_req_addr[17:0]; // @[PTW.scala:270:18, :343:79]
wire [17:0] _io_requestor_1_resp_bits_gpa_bits_T_6 = r_req_addr[17:0]; // @[PTW.scala:270:18, :343:79]
wire [17:0] _r_pte_T_18 = r_req_addr[17:0]; // @[PTW.scala:270:18, :343:79]
wire [17:0] _aux_pte_s1_ppns_T_1 = r_req_addr[17:0]; // @[PTW.scala:270:18, :343:79, :744:122]
wire [43:0] _io_requestor_0_resp_bits_gpa_bits_T_7 = {_io_requestor_0_resp_bits_gpa_bits_T_5, _io_requestor_0_resp_bits_gpa_bits_T_6}; // @[PTW.scala:343:{44,49,79}]
wire [34:0] _io_requestor_0_resp_bits_gpa_bits_T_8 = aux_pte_ppn[43:9]; // @[PTW.scala:280:20, :343:49]
wire [34:0] _io_requestor_1_resp_bits_gpa_bits_T_8 = aux_pte_ppn[43:9]; // @[PTW.scala:280:20, :343:49]
wire [8:0] _io_requestor_0_resp_bits_gpa_bits_T_9 = r_req_addr[8:0]; // @[PTW.scala:270:18, :343:79]
wire [8:0] _io_requestor_1_resp_bits_gpa_bits_T_9 = r_req_addr[8:0]; // @[PTW.scala:270:18, :343:79]
wire [8:0] _r_pte_T_21 = r_req_addr[8:0]; // @[PTW.scala:270:18, :343:79]
wire [8:0] _aux_pte_s1_ppns_T_3 = r_req_addr[8:0]; // @[PTW.scala:270:18, :343:79, :744:122]
wire [43:0] _io_requestor_0_resp_bits_gpa_bits_T_10 = {_io_requestor_0_resp_bits_gpa_bits_T_8, _io_requestor_0_resp_bits_gpa_bits_T_9}; // @[PTW.scala:343:{44,49,79}]
wire io_requestor_0_resp_bits_gpa_bits_truncIdx = _io_requestor_0_resp_bits_gpa_bits_truncIdx_T[0]; // @[package.scala:38:{21,47}]
wire _io_requestor_0_resp_bits_gpa_bits_T_11 = io_requestor_0_resp_bits_gpa_bits_truncIdx; // @[package.scala:38:47, :39:86]
wire [43:0] _io_requestor_0_resp_bits_gpa_bits_T_12 = _io_requestor_0_resp_bits_gpa_bits_T_11 ? _io_requestor_0_resp_bits_gpa_bits_T_10 : _io_requestor_0_resp_bits_gpa_bits_T_7; // @[package.scala:39:{76,86}]
wire [43:0] _io_requestor_0_resp_bits_gpa_bits_T_13 = _io_requestor_0_resp_bits_gpa_bits_T_4 ? aux_pte_ppn : _io_requestor_0_resp_bits_gpa_bits_T_12; // @[package.scala:39:76]
wire [55:0] _io_requestor_0_resp_bits_gpa_bits_T_14 = {_io_requestor_0_resp_bits_gpa_bits_T_13, gpa_pgoff}; // @[PTW.scala:281:22, :566:{10,14}]
assign io_requestor_0_resp_bits_gpa_bits_0 = _io_requestor_0_resp_bits_gpa_bits_T_14[38:0]; // @[PTW.scala:219:7, :565:40, :566:10]
assign _io_requestor_0_resp_bits_gpa_is_pte_T = ~stage2_final; // @[PTW.scala:283:25, :357:107, :567:45]
assign io_requestor_0_resp_bits_gpa_is_pte_0 = _io_requestor_0_resp_bits_gpa_is_pte_T; // @[PTW.scala:219:7, :567:45]
assign io_requestor_1_resp_bits_homogeneous_0 = _io_requestor_1_resp_bits_homogeneous_T; // @[PTW.scala:219:7, :562:58]
wire _io_requestor_1_resp_bits_gpa_bits_T = ~stage2_final; // @[PTW.scala:283:25, :357:107, :566:15]
wire _io_requestor_1_resp_bits_gpa_bits_T_1 = ~r_req_vstage1; // @[PTW.scala:270:18, :566:32]
wire _io_requestor_1_resp_bits_gpa_bits_T_2 = _io_requestor_1_resp_bits_gpa_bits_T | _io_requestor_1_resp_bits_gpa_bits_T_1; // @[PTW.scala:566:{15,29,32}]
wire _io_requestor_1_resp_bits_gpa_bits_T_4 = _io_requestor_1_resp_bits_gpa_bits_T_2 | _io_requestor_1_resp_bits_gpa_bits_T_3; // @[PTW.scala:566:{29,47,60}]
wire [43:0] _io_requestor_1_resp_bits_gpa_bits_T_7 = {_io_requestor_1_resp_bits_gpa_bits_T_5, _io_requestor_1_resp_bits_gpa_bits_T_6}; // @[PTW.scala:343:{44,49,79}]
wire [43:0] _io_requestor_1_resp_bits_gpa_bits_T_10 = {_io_requestor_1_resp_bits_gpa_bits_T_8, _io_requestor_1_resp_bits_gpa_bits_T_9}; // @[PTW.scala:343:{44,49,79}]
wire io_requestor_1_resp_bits_gpa_bits_truncIdx = _io_requestor_1_resp_bits_gpa_bits_truncIdx_T[0]; // @[package.scala:38:{21,47}]
wire _io_requestor_1_resp_bits_gpa_bits_T_11 = io_requestor_1_resp_bits_gpa_bits_truncIdx; // @[package.scala:38:47, :39:86]
wire [43:0] _io_requestor_1_resp_bits_gpa_bits_T_12 = _io_requestor_1_resp_bits_gpa_bits_T_11 ? _io_requestor_1_resp_bits_gpa_bits_T_10 : _io_requestor_1_resp_bits_gpa_bits_T_7; // @[package.scala:39:{76,86}]
wire [43:0] _io_requestor_1_resp_bits_gpa_bits_T_13 = _io_requestor_1_resp_bits_gpa_bits_T_4 ? aux_pte_ppn : _io_requestor_1_resp_bits_gpa_bits_T_12; // @[package.scala:39:76]
wire [55:0] _io_requestor_1_resp_bits_gpa_bits_T_14 = {_io_requestor_1_resp_bits_gpa_bits_T_13, gpa_pgoff}; // @[PTW.scala:281:22, :566:{10,14}]
assign io_requestor_1_resp_bits_gpa_bits_0 = _io_requestor_1_resp_bits_gpa_bits_T_14[38:0]; // @[PTW.scala:219:7, :565:40, :566:10]
assign _io_requestor_1_resp_bits_gpa_is_pte_T = ~stage2_final; // @[PTW.scala:283:25, :357:107, :567:45]
assign io_requestor_1_resp_bits_gpa_is_pte_0 = _io_requestor_1_resp_bits_gpa_is_pte_T; // @[PTW.scala:219:7, :567:45]
wire [2:0] next_state; // @[PTW.scala:579:31]
wire do_switch; // @[PTW.scala:581:30]
wire _T_129 = _arb_io_out_ready_T_2 & _arb_io_out_valid; // @[Decoupled.scala:51:35]
wire _GEN_20 = ~(|state) & _T_129; // @[Decoupled.scala:51:35]
wire [43:0] aux_ppn = {17'h0, _arb_io_out_bits_bits_addr}; // @[PTW.scala:236:19, :589:38]
wire [2:0] _next_state_T = {2'h0, _arb_io_out_bits_valid}; // @[PTW.scala:236:19, :593:26]
wire [14:0] resp_gf_idxs_0 = aux_ppn[43:29]; // @[PTW.scala:589:38, :787:58]
wire [14:0] _resp_gf_WIRE_0 = resp_gf_idxs_0; // @[package.scala:43:40]
wire _resp_gf_T_1 = |_resp_gf_WIRE_0; // @[package.scala:43:40]
wire [29:0] _gpa_pgoff_T_1 = {r_req_addr, 3'h0}; // @[PTW.scala:270:18, :615:67]
wire [29:0] _gpa_pgoff_T_2 = _gpa_pgoff_T ? _gpa_pgoff_T_1 : 30'h0; // @[PTW.scala:615:{25,36,67}]
wire [2:0] _aux_count_T_1 = {1'h0, aux_count} + 3'h1; // @[PTW.scala:278:22, :619:32]
wire [1:0] _aux_count_T_2 = _aux_count_T_1[1:0]; // @[PTW.scala:619:32]
wire [2:0] _GEN_21 = {1'h0, count} + 3'h1; // @[PTW.scala:259:18, :624:24]
wire [2:0] _count_T_4; // @[PTW.scala:624:24]
assign _count_T_4 = _GEN_21; // @[PTW.scala:624:24]
wire [2:0] _count_T_6; // @[PTW.scala:696:22]
assign _count_T_6 = _GEN_21; // @[PTW.scala:624:24, :696:22]
wire [2:0] _aux_count_T_3; // @[PTW.scala:741:38]
assign _aux_count_T_3 = _GEN_21; // @[PTW.scala:624:24, :741:38]
wire [1:0] _count_T_5 = _count_T_4[1:0]; // @[PTW.scala:624:24]
wire [2:0] _next_state_T_1 = io_mem_req_ready_0 ? 3'h2 : 3'h1; // @[PTW.scala:219:7, :627:26]
wire _T_140 = state == 3'h2; // @[PTW.scala:233:22, :583:18]
wire _T_141 = state == 3'h4; // @[PTW.scala:233:22, :583:18]
wire _io_dpath_perf_pte_miss_T = ~(count[1]); // @[PTW.scala:259:18, :310:21, :317:73, :640:39]
wire _GEN_22 = _T_152 | _T_140; // @[PTW.scala:377:24, :393:26, :583:18]
assign io_dpath_perf_pte_miss_0 = ~(~(|state) | _GEN_22) & _T_141 & _io_dpath_perf_pte_miss_T; // @[PTW.scala:219:7, :233:22, :240:30, :393:26, :583:18, :640:{30,39}]
wire [1:0] _merged_pte_superpage_mask_T = stage2_final ? max_count : 2'h2; // @[PTW.scala:283:25, :289:25, :662:45]
wire _merged_pte_superpage_mask_T_1 = _merged_pte_superpage_mask_T == 2'h1; // @[package.scala:39:86]
wire [43:0] _merged_pte_superpage_mask_T_2 = _merged_pte_superpage_mask_T_1 ? 44'hFFFFFFFFE00 : 44'hFFFFFFC0000; // @[package.scala:39:{76,86}]
wire _merged_pte_superpage_mask_T_3 = _merged_pte_superpage_mask_T == 2'h2; // @[package.scala:39:86]
wire [43:0] _merged_pte_superpage_mask_T_4 = _merged_pte_superpage_mask_T_3 ? 44'hFFFFFFFFFFF : _merged_pte_superpage_mask_T_2; // @[package.scala:39:{76,86}]
wire _merged_pte_superpage_mask_T_5 = &_merged_pte_superpage_mask_T; // @[package.scala:39:86]
wire [43:0] merged_pte_superpage_mask = _merged_pte_superpage_mask_T_5 ? 44'hFFFFFFFFFFF : _merged_pte_superpage_mask_T_4; // @[package.scala:39:{76,86}]
wire [25:0] _merged_pte_stage1_ppns_T = pte_ppn[43:18]; // @[PTW.scala:305:26, :663:64]
wire [25:0] _aux_pte_s1_ppns_T = pte_ppn[43:18]; // @[PTW.scala:305:26, :663:64, :744:62]
wire [17:0] _merged_pte_stage1_ppns_T_1 = aux_pte_ppn[17:0]; // @[PTW.scala:280:20, :663:125]
wire [43:0] merged_pte_stage1_ppns_0 = {_merged_pte_stage1_ppns_T, _merged_pte_stage1_ppns_T_1}; // @[PTW.scala:663:{56,64,125}]
wire [34:0] _merged_pte_stage1_ppns_T_2 = pte_ppn[43:9]; // @[PTW.scala:305:26, :663:64]
wire [34:0] _aux_pte_s1_ppns_T_2 = pte_ppn[43:9]; // @[PTW.scala:305:26, :663:64, :744:62]
wire [8:0] _merged_pte_stage1_ppns_T_3 = aux_pte_ppn[8:0]; // @[PTW.scala:280:20, :663:125]
wire [43:0] merged_pte_stage1_ppns_1 = {_merged_pte_stage1_ppns_T_2, _merged_pte_stage1_ppns_T_3}; // @[PTW.scala:663:{56,64,125}]
wire [43:0] _merged_pte_stage1_ppn_T_1 = _merged_pte_stage1_ppn_T ? merged_pte_stage1_ppns_1 : merged_pte_stage1_ppns_0; // @[package.scala:39:{76,86}]
wire [43:0] _merged_pte_stage1_ppn_T_3 = _merged_pte_stage1_ppn_T_2 ? pte_ppn : _merged_pte_stage1_ppn_T_1; // @[package.scala:39:{76,86}]
wire _merged_pte_stage1_ppn_T_4 = &count; // @[package.scala:39:86]
wire [43:0] merged_pte_stage1_ppn = _merged_pte_stage1_ppn_T_4 ? pte_ppn : _merged_pte_stage1_ppn_T_3; // @[package.scala:39:{76,86}]
wire [43:0] _merged_pte_T = merged_pte_stage1_ppn & merged_pte_superpage_mask; // @[package.scala:39:76]
wire [43:0] merged_pte_ppn = _merged_pte_T; // @[PTW.scala:665:24, :771:26]
wire _r_pte_T_2 = ~resp_gf; // @[PTW.scala:263:20, :670:32]
wire [43:0] _r_pte_pte_ppn_T_1; // @[PTW.scala:781:19]
wire [43:0] r_pte_pte_ppn; // @[PTW.scala:780:26]
wire [41:0] _r_pte_pte_ppn_T = r_hgatp_ppn[43:2]; // @[PTW.scala:276:20, :781:30]
wire [41:0] _r_pte_pte_ppn_T_2 = r_hgatp_ppn[43:2]; // @[PTW.scala:276:20, :781:30]
assign _r_pte_pte_ppn_T_1 = {_r_pte_pte_ppn_T, 2'h0}; // @[PTW.scala:781:{19,30}]
assign r_pte_pte_ppn = _r_pte_pte_ppn_T_1; // @[PTW.scala:780:26, :781:19]
wire _r_pte_T_7 = _r_pte_T_6 & pte_cache_hit; // @[PTW.scala:367:24, :674:{15,25}]
wire [43:0] r_pte_pte_1_ppn; // @[PTW.scala:771:26]
assign r_pte_pte_1_ppn = {24'h0, pte_cache_data}; // @[Mux.scala:30:73]
wire [16:0] r_pte_idxs_0_1 = pte_ppn[43:27]; // @[PTW.scala:305:26, :778:58]
wire [1:0] r_pte_lsbs_1; // @[PTW.scala:779:27]
assign r_pte_lsbs_1 = r_pte_idxs_0_1[1:0]; // @[PTW.scala:778:58, :779:27]
wire [43:0] _r_pte_pte_ppn_T_3; // @[PTW.scala:781:19]
wire [43:0] r_pte_pte_2_ppn; // @[PTW.scala:780:26]
assign _r_pte_pte_ppn_T_3 = {_r_pte_pte_ppn_T_2, r_pte_lsbs_1}; // @[PTW.scala:779:27, :781:{19,30}]
assign r_pte_pte_2_ppn = _r_pte_pte_ppn_T_3; // @[PTW.scala:780:26, :781:19]
wire _r_pte_T_8 = ~traverse; // @[PTW.scala:317:64, :678:29]
wire _r_pte_T_9 = _r_pte_T_8 & r_req_vstage1; // @[PTW.scala:270:18, :678:{29,39}]
wire _r_pte_T_10 = _r_pte_T_9 & stage2; // @[PTW.scala:282:19, :678:{39,56}]
wire [9:0] _r_pte_T_11_reserved_for_future = _r_pte_T_10 ? merged_pte_reserved_for_future : pte_reserved_for_future; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire [43:0] _r_pte_T_11_ppn = _r_pte_T_10 ? merged_pte_ppn : pte_ppn; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire [1:0] _r_pte_T_11_reserved_for_software = _r_pte_T_10 ? merged_pte_reserved_for_software : pte_reserved_for_software; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire _r_pte_T_11_d = _r_pte_T_10 ? merged_pte_d : pte_d; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire _r_pte_T_11_a = _r_pte_T_10 ? merged_pte_a : pte_a; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire _r_pte_T_11_g = _r_pte_T_10 ? merged_pte_g : pte_g; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire _r_pte_T_11_u = _r_pte_T_10 ? merged_pte_u : pte_u; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire _r_pte_T_11_x = _r_pte_T_10 ? merged_pte_x : pte_x; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire _r_pte_T_11_w = _r_pte_T_10 ? merged_pte_w : pte_w; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire _r_pte_T_11_r = _r_pte_T_10 ? merged_pte_r : pte_r; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire _r_pte_T_11_v = _r_pte_T_10 ? merged_pte_v : pte_v; // @[PTW.scala:305:26, :678:{28,56}, :771:26]
wire _r_pte_T_12 = &state; // @[PTW.scala:233:22, :680:15]
wire _r_pte_T_13 = ~homogeneous; // @[PTW.scala:549:36, :680:43]
wire _r_pte_T_14 = _r_pte_T_12 & _r_pte_T_13; // @[PTW.scala:680:{15,40,43}]
wire _r_pte_T_15 = count != 2'h2; // @[PTW.scala:259:18, :680:65]
wire _r_pte_T_16 = _r_pte_T_14 & _r_pte_T_15; // @[PTW.scala:680:{40,56,65}]
wire [25:0] _r_pte_T_17 = r_pte_ppn[43:18]; // @[PTW.scala:275:18, :343:49]
wire [43:0] _r_pte_T_19 = {_r_pte_T_17, _r_pte_T_18}; // @[PTW.scala:343:{44,49,79}]
wire [34:0] _r_pte_T_20 = r_pte_ppn[43:9]; // @[PTW.scala:275:18, :343:49]
wire [43:0] _r_pte_T_22 = {_r_pte_T_20, _r_pte_T_21}; // @[PTW.scala:343:{44,49,79}]
wire r_pte_truncIdx = _r_pte_truncIdx_T[0]; // @[package.scala:38:{21,47}]
wire _r_pte_T_23 = r_pte_truncIdx; // @[package.scala:38:47, :39:86]
wire [43:0] _r_pte_T_24 = _r_pte_T_23 ? _r_pte_T_22 : _r_pte_T_19; // @[package.scala:39:{76,86}]
wire [43:0] r_pte_pte_3_ppn = _r_pte_T_24; // @[package.scala:39:76]
wire _r_pte_T_25 = _arb_io_out_ready_T_2 & _arb_io_out_valid; // @[Decoupled.scala:51:35]
wire [9:0] _r_pte_T_26_reserved_for_future = r_pte_pte_5_reserved_for_future; // @[PTW.scala:682:29, :771:26]
wire [43:0] _r_pte_T_26_ppn = r_pte_pte_5_ppn; // @[PTW.scala:682:29, :771:26]
wire [1:0] _r_pte_T_26_reserved_for_software = r_pte_pte_5_reserved_for_software; // @[PTW.scala:682:29, :771:26]
wire _r_pte_T_26_d = r_pte_pte_5_d; // @[PTW.scala:682:29, :771:26]
wire _r_pte_T_26_a = r_pte_pte_5_a; // @[PTW.scala:682:29, :771:26]
wire _r_pte_T_26_g = r_pte_pte_5_g; // @[PTW.scala:682:29, :771:26]
wire _r_pte_T_26_u = r_pte_pte_5_u; // @[PTW.scala:682:29, :771:26]
wire _r_pte_T_26_x = r_pte_pte_5_x; // @[PTW.scala:682:29, :771:26]
wire _r_pte_T_26_w = r_pte_pte_5_w; // @[PTW.scala:682:29, :771:26]
wire _r_pte_T_26_r = r_pte_pte_5_r; // @[PTW.scala:682:29, :771:26]
wire _r_pte_T_26_v = r_pte_pte_5_v; // @[PTW.scala:682:29, :771:26]
wire [9:0] _r_pte_T_27_reserved_for_future = _r_pte_T_25 ? _r_pte_T_26_reserved_for_future : r_pte_reserved_for_future; // @[Decoupled.scala:51:35]
wire [43:0] _r_pte_T_27_ppn = _r_pte_T_25 ? _r_pte_T_26_ppn : r_pte_ppn; // @[Decoupled.scala:51:35]
wire [1:0] _r_pte_T_27_reserved_for_software = _r_pte_T_25 ? _r_pte_T_26_reserved_for_software : r_pte_reserved_for_software; // @[Decoupled.scala:51:35]
wire _r_pte_T_27_d = _r_pte_T_25 ? _r_pte_T_26_d : r_pte_d; // @[Decoupled.scala:51:35]
wire _r_pte_T_27_a = _r_pte_T_25 ? _r_pte_T_26_a : r_pte_a; // @[Decoupled.scala:51:35]
wire _r_pte_T_27_g = _r_pte_T_25 ? _r_pte_T_26_g : r_pte_g; // @[Decoupled.scala:51:35]
wire _r_pte_T_27_u = _r_pte_T_25 ? _r_pte_T_26_u : r_pte_u; // @[Decoupled.scala:51:35]
wire _r_pte_T_27_x = _r_pte_T_25 ? _r_pte_T_26_x : r_pte_x; // @[Decoupled.scala:51:35]
wire _r_pte_T_27_w = _r_pte_T_25 ? _r_pte_T_26_w : r_pte_w; // @[Decoupled.scala:51:35]
wire _r_pte_T_27_r = _r_pte_T_25 ? _r_pte_T_26_r : r_pte_r; // @[Decoupled.scala:51:35]
wire _r_pte_T_27_v = _r_pte_T_25 ? _r_pte_T_26_v : r_pte_v; // @[Decoupled.scala:51:35]
wire [9:0] _r_pte_T_28_reserved_for_future = _r_pte_T_16 ? r_pte_pte_3_reserved_for_future : _r_pte_T_27_reserved_for_future; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire [43:0] _r_pte_T_28_ppn = _r_pte_T_16 ? r_pte_pte_3_ppn : _r_pte_T_27_ppn; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire [1:0] _r_pte_T_28_reserved_for_software = _r_pte_T_16 ? r_pte_pte_3_reserved_for_software : _r_pte_T_27_reserved_for_software; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire _r_pte_T_28_d = _r_pte_T_16 ? r_pte_pte_3_d : _r_pte_T_27_d; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire _r_pte_T_28_a = _r_pte_T_16 ? r_pte_pte_3_a : _r_pte_T_27_a; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire _r_pte_T_28_g = _r_pte_T_16 ? r_pte_pte_3_g : _r_pte_T_27_g; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire _r_pte_T_28_u = _r_pte_T_16 ? r_pte_pte_3_u : _r_pte_T_27_u; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire _r_pte_T_28_x = _r_pte_T_16 ? r_pte_pte_3_x : _r_pte_T_27_x; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire _r_pte_T_28_w = _r_pte_T_16 ? r_pte_pte_3_w : _r_pte_T_27_w; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire _r_pte_T_28_r = _r_pte_T_16 ? r_pte_pte_3_r : _r_pte_T_27_r; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire _r_pte_T_28_v = _r_pte_T_16 ? r_pte_pte_3_v : _r_pte_T_27_v; // @[PTW.scala:680:{8,56}, :682:8, :771:26]
wire [9:0] _r_pte_T_29_reserved_for_future = mem_resp_valid ? _r_pte_T_11_reserved_for_future : _r_pte_T_28_reserved_for_future; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire [43:0] _r_pte_T_29_ppn = mem_resp_valid ? _r_pte_T_11_ppn : _r_pte_T_28_ppn; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire [1:0] _r_pte_T_29_reserved_for_software = mem_resp_valid ? _r_pte_T_11_reserved_for_software : _r_pte_T_28_reserved_for_software; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire _r_pte_T_29_d = mem_resp_valid ? _r_pte_T_11_d : _r_pte_T_28_d; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire _r_pte_T_29_a = mem_resp_valid ? _r_pte_T_11_a : _r_pte_T_28_a; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire _r_pte_T_29_g = mem_resp_valid ? _r_pte_T_11_g : _r_pte_T_28_g; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire _r_pte_T_29_u = mem_resp_valid ? _r_pte_T_11_u : _r_pte_T_28_u; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire _r_pte_T_29_x = mem_resp_valid ? _r_pte_T_11_x : _r_pte_T_28_x; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire _r_pte_T_29_w = mem_resp_valid ? _r_pte_T_11_w : _r_pte_T_28_w; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire _r_pte_T_29_r = mem_resp_valid ? _r_pte_T_11_r : _r_pte_T_28_r; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire _r_pte_T_29_v = mem_resp_valid ? _r_pte_T_11_v : _r_pte_T_28_v; // @[PTW.scala:292:31, :678:{8,28}, :680:8]
wire [9:0] _r_pte_T_30_reserved_for_future = do_switch ? r_pte_pte_2_reserved_for_future : _r_pte_T_29_reserved_for_future; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire [43:0] _r_pte_T_30_ppn = do_switch ? r_pte_pte_2_ppn : _r_pte_T_29_ppn; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire [1:0] _r_pte_T_30_reserved_for_software = do_switch ? r_pte_pte_2_reserved_for_software : _r_pte_T_29_reserved_for_software; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire _r_pte_T_30_d = do_switch ? r_pte_pte_2_d : _r_pte_T_29_d; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire _r_pte_T_30_a = do_switch ? r_pte_pte_2_a : _r_pte_T_29_a; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire _r_pte_T_30_g = do_switch ? r_pte_pte_2_g : _r_pte_T_29_g; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire _r_pte_T_30_u = do_switch ? r_pte_pte_2_u : _r_pte_T_29_u; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire _r_pte_T_30_x = do_switch ? r_pte_pte_2_x : _r_pte_T_29_x; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire _r_pte_T_30_w = do_switch ? r_pte_pte_2_w : _r_pte_T_29_w; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire _r_pte_T_30_r = do_switch ? r_pte_pte_2_r : _r_pte_T_29_r; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire _r_pte_T_30_v = do_switch ? r_pte_pte_2_v : _r_pte_T_29_v; // @[PTW.scala:581:30, :676:8, :678:8, :780:26]
wire [9:0] _r_pte_T_31_reserved_for_future = _r_pte_T_7 ? 10'h0 : _r_pte_T_30_reserved_for_future; // @[PTW.scala:674:{8,25}, :676:8]
wire [43:0] _r_pte_T_31_ppn = _r_pte_T_7 ? r_pte_pte_1_ppn : _r_pte_T_30_ppn; // @[PTW.scala:674:{8,25}, :676:8, :771:26]
wire [1:0] _r_pte_T_31_reserved_for_software = _r_pte_T_7 ? 2'h0 : _r_pte_T_30_reserved_for_software; // @[PTW.scala:674:{8,25}, :676:8]
wire _r_pte_T_31_d = ~_r_pte_T_7 & _r_pte_T_30_d; // @[PTW.scala:674:{8,25}, :676:8]
wire _r_pte_T_31_a = ~_r_pte_T_7 & _r_pte_T_30_a; // @[PTW.scala:674:{8,25}, :676:8]
wire _r_pte_T_31_g = ~_r_pte_T_7 & _r_pte_T_30_g; // @[PTW.scala:674:{8,25}, :676:8]
wire _r_pte_T_31_u = ~_r_pte_T_7 & _r_pte_T_30_u; // @[PTW.scala:674:{8,25}, :676:8]
wire _r_pte_T_31_x = ~_r_pte_T_7 & _r_pte_T_30_x; // @[PTW.scala:674:{8,25}, :676:8]
wire _r_pte_T_31_w = ~_r_pte_T_7 & _r_pte_T_30_w; // @[PTW.scala:674:{8,25}, :676:8]
wire _r_pte_T_31_r = ~_r_pte_T_7 & _r_pte_T_30_r; // @[PTW.scala:674:{8,25}, :676:8]
wire _r_pte_T_31_v = ~_r_pte_T_7 & _r_pte_T_30_v; // @[PTW.scala:674:{8,25}, :676:8]
wire [9:0] _r_pte_T_32_reserved_for_future = _r_pte_T_31_reserved_for_future; // @[PTW.scala:672:8, :674:8]
wire [43:0] _r_pte_T_32_ppn = _r_pte_T_31_ppn; // @[PTW.scala:672:8, :674:8]
wire [1:0] _r_pte_T_32_reserved_for_software = _r_pte_T_31_reserved_for_software; // @[PTW.scala:672:8, :674:8]
wire _r_pte_T_32_d = _r_pte_T_31_d; // @[PTW.scala:672:8, :674:8]
wire _r_pte_T_32_a = _r_pte_T_31_a; // @[PTW.scala:672:8, :674:8]
wire _r_pte_T_32_g = _r_pte_T_31_g; // @[PTW.scala:672:8, :674:8]
wire _r_pte_T_32_u = _r_pte_T_31_u; // @[PTW.scala:672:8, :674:8]
wire _r_pte_T_32_x = _r_pte_T_31_x; // @[PTW.scala:672:8, :674:8]
wire _r_pte_T_32_w = _r_pte_T_31_w; // @[PTW.scala:672:8, :674:8]
wire _r_pte_T_32_r = _r_pte_T_31_r; // @[PTW.scala:672:8, :674:8]
wire _r_pte_T_32_v = _r_pte_T_31_v; // @[PTW.scala:672:8, :674:8]
wire [9:0] _r_pte_T_33_reserved_for_future = _r_pte_T_32_reserved_for_future; // @[PTW.scala:670:8, :672:8]
wire [43:0] _r_pte_T_33_ppn = _r_pte_T_32_ppn; // @[PTW.scala:670:8, :672:8]
wire [1:0] _r_pte_T_33_reserved_for_software = _r_pte_T_32_reserved_for_software; // @[PTW.scala:670:8, :672:8]
wire _r_pte_T_33_d = _r_pte_T_32_d; // @[PTW.scala:670:8, :672:8]
wire _r_pte_T_33_a = _r_pte_T_32_a; // @[PTW.scala:670:8, :672:8]
wire _r_pte_T_33_g = _r_pte_T_32_g; // @[PTW.scala:670:8, :672:8]
wire _r_pte_T_33_u = _r_pte_T_32_u; // @[PTW.scala:670:8, :672:8]
wire _r_pte_T_33_x = _r_pte_T_32_x; // @[PTW.scala:670:8, :672:8]
wire _r_pte_T_33_w = _r_pte_T_32_w; // @[PTW.scala:670:8, :672:8]
wire _r_pte_T_33_r = _r_pte_T_32_r; // @[PTW.scala:670:8, :672:8]
wire _r_pte_T_33_v = _r_pte_T_32_v; // @[PTW.scala:670:8, :672:8]
wire [1:0] _count_T_7 = _count_T_6[1:0]; // @[PTW.scala:696:22]
wire _gf_T = ~stage2_final; // @[PTW.scala:283:25, :357:107, :698:27]
wire _gf_T_1 = stage2 & _gf_T; // @[PTW.scala:282:19, :698:{24,27}]
wire _gf_T_2 = ~pte_w; // @[PTW.scala:139:42, :141:47, :305:26]
wire _gf_T_3 = pte_x & _gf_T_2; // @[PTW.scala:141:{44,47}, :305:26]
wire _gf_T_4 = pte_r | _gf_T_3; // @[PTW.scala:141:{38,44}, :305:26]
wire _gf_T_5 = pte_v & _gf_T_4; // @[PTW.scala:141:{32,38}, :305:26]
wire _gf_T_6 = _gf_T_5 & pte_a; // @[PTW.scala:141:{32,52}, :305:26]
wire _gf_T_7 = _gf_T_6 & pte_r; // @[PTW.scala:141:52, :149:35, :305:26]
wire _gf_T_8 = _gf_T_7 & pte_u; // @[PTW.scala:143:33, :149:35, :305:26]
wire _gf_T_9 = ~_gf_T_8; // @[PTW.scala:143:33, :698:44]
wire _gf_T_10 = _gf_T_1 & _gf_T_9; // @[PTW.scala:698:{24,41,44}]
wire _gf_T_11 = ~pte_w; // @[PTW.scala:139:42, :141:47, :305:26]
wire _gf_T_12 = pte_x & _gf_T_11; // @[PTW.scala:141:{44,47}, :305:26]
wire _gf_T_13 = pte_r | _gf_T_12; // @[PTW.scala:141:{38,44}, :305:26]
wire _gf_T_14 = pte_v & _gf_T_13; // @[PTW.scala:141:{32,38}, :305:26]
wire _gf_T_15 = _gf_T_14 & pte_a; // @[PTW.scala:141:{32,52}, :305:26]
wire _gf_T_16 = ~(|pte_reserved_for_future); // @[PTW.scala:139:92, :305:26, :698:97]
wire _gf_T_17 = _gf_T_15 & _gf_T_16; // @[PTW.scala:141:52, :698:{70,97}]
wire _gf_T_18 = _gf_T_17 & invalid_gpa; // @[PTW.scala:314:32, :698:{70,105}]
wire gf = _gf_T_10 | _gf_T_18; // @[PTW.scala:698:{41,55,105}]
wire ae = pte_v & invalid_paddr; // @[PTW.scala:305:26, :313:9, :699:22]
wire _pf_T = |pte_reserved_for_future; // @[PTW.scala:139:92, :305:26, :700:49]
wire pf = pte_v & _pf_T; // @[PTW.scala:305:26, :700:{22,49}]
wire _success_T = ~ae; // @[PTW.scala:699:22, :701:30]
wire _success_T_1 = pte_v & _success_T; // @[PTW.scala:305:26, :701:{27,30}]
wire _success_T_2 = ~pf; // @[PTW.scala:700:22, :701:37]
wire _success_T_3 = _success_T_1 & _success_T_2; // @[PTW.scala:701:{27,34,37}]
wire _success_T_4 = ~gf; // @[PTW.scala:698:55, :701:44]
wire success = _success_T_3 & _success_T_4; // @[PTW.scala:701:{34,41,44}]
wire _T_168 = do_both_stages & ~stage2_final & success; // @[PTW.scala:283:25, :288:38, :357:107, :701:41, :703:{28,45}]
assign do_switch = mem_resp_valid & (traverse ? do_both_stages & ~stage2 : _T_168 & ~stage2); // @[PTW.scala:282:19, :288:38, :292:31, :306:38, :317:64, :581:30, :691:25, :694:21, :695:{28,40}, :703:{28,45,57}, :704:23, :709:21]
wire _l2_refill_T_1 = success & _l2_refill_T; // @[PTW.scala:701:41, :713:{30,39}]
wire _l2_refill_T_2 = ~r_req_need_gpa; // @[PTW.scala:270:18, :713:61]
wire _l2_refill_T_3 = _l2_refill_T_1 & _l2_refill_T_2; // @[PTW.scala:713:{30,58,61}]
wire _l2_refill_T_4 = ~r_req_vstage1; // @[PTW.scala:270:18, :566:32, :714:12]
wire _l2_refill_T_5 = ~r_req_stage2; // @[PTW.scala:270:18, :358:65, :714:30]
wire _l2_refill_T_6 = _l2_refill_T_4 & _l2_refill_T_5; // @[PTW.scala:714:{12,27,30}]
wire _l2_refill_T_8 = do_both_stages & _l2_refill_T_7; // @[PTW.scala:288:38, :715:{27,40}]
wire _l2_refill_T_9 = ~pte_w; // @[PTW.scala:139:42, :141:47, :305:26]
wire _l2_refill_T_10 = pte_x & _l2_refill_T_9; // @[PTW.scala:141:{44,47}, :305:26]
wire _l2_refill_T_11 = pte_r | _l2_refill_T_10; // @[PTW.scala:141:{38,44}, :305:26]
wire _l2_refill_T_12 = pte_v & _l2_refill_T_11; // @[PTW.scala:141:{32,38}, :305:26]
wire _l2_refill_T_13 = _l2_refill_T_12 & pte_a; // @[PTW.scala:141:{32,52}, :305:26]
wire _l2_refill_T_14 = _l2_refill_T_13 & pte_w; // @[PTW.scala:141:52, :151:35, :305:26]
wire _l2_refill_T_15 = _l2_refill_T_14 & pte_d; // @[PTW.scala:151:{35,40}, :305:26]
wire _l2_refill_T_16 = _l2_refill_T_15 & pte_u; // @[PTW.scala:145:33, :151:40, :305:26]
wire _l2_refill_T_17 = ~pte_w; // @[PTW.scala:139:42, :141:47, :305:26]
wire _l2_refill_T_18 = pte_x & _l2_refill_T_17; // @[PTW.scala:141:{44,47}, :305:26]
wire _l2_refill_T_19 = pte_r | _l2_refill_T_18; // @[PTW.scala:141:{38,44}, :305:26]
wire _l2_refill_T_20 = pte_v & _l2_refill_T_19; // @[PTW.scala:141:{32,38}, :305:26]
wire _l2_refill_T_21 = _l2_refill_T_20 & pte_a; // @[PTW.scala:141:{32,52}, :305:26]
wire _l2_refill_T_22 = _l2_refill_T_21 & pte_x; // @[PTW.scala:141:52, :153:35, :305:26]
wire _l2_refill_T_23 = _l2_refill_T_22 & pte_u; // @[PTW.scala:147:33, :153:35, :305:26]
wire _l2_refill_T_24 = _l2_refill_T_16 & _l2_refill_T_23; // @[PTW.scala:145:33, :147:33, :155:41]
wire _l2_refill_T_25 = _l2_refill_T_8 & _l2_refill_T_24; // @[PTW.scala:155:41, :715:{27,59}]
wire _l2_refill_T_26 = _l2_refill_T_6 | _l2_refill_T_25; // @[PTW.scala:714:{27,44}, :715:59]
wire _l2_refill_T_27 = _l2_refill_T_3 & _l2_refill_T_26; // @[PTW.scala:713:{58,77}, :714:44]
wire _GEN_23 = traverse | _T_168; // @[PTW.scala:317:64, :398:26, :694:21, :703:{28,45,57}, :713:19]
wire _resp_ae_ptw_T = ~(count[1]); // @[PTW.scala:259:18, :310:21, :317:73, :725:36]
wire _resp_ae_ptw_T_1 = ae & _resp_ae_ptw_T; // @[PTW.scala:699:22, :725:{27,36}]
wire _resp_ae_ptw_T_2 = ~pte_r; // @[PTW.scala:139:36, :305:26]
wire _resp_ae_ptw_T_3 = pte_v & _resp_ae_ptw_T_2; // @[PTW.scala:139:{33,36}, :305:26]
wire _resp_ae_ptw_T_4 = ~pte_w; // @[PTW.scala:139:42, :305:26]
wire _resp_ae_ptw_T_5 = _resp_ae_ptw_T_3 & _resp_ae_ptw_T_4; // @[PTW.scala:139:{33,39,42}]
wire _resp_ae_ptw_T_6 = ~pte_x; // @[PTW.scala:139:48, :305:26]
wire _resp_ae_ptw_T_7 = _resp_ae_ptw_T_5 & _resp_ae_ptw_T_6; // @[PTW.scala:139:{39,45,48}]
wire _resp_ae_ptw_T_8 = ~pte_d; // @[PTW.scala:139:54, :305:26]
wire _resp_ae_ptw_T_9 = _resp_ae_ptw_T_7 & _resp_ae_ptw_T_8; // @[PTW.scala:139:{45,51,54}]
wire _resp_ae_ptw_T_10 = ~pte_a; // @[PTW.scala:139:60, :305:26]
wire _resp_ae_ptw_T_11 = _resp_ae_ptw_T_9 & _resp_ae_ptw_T_10; // @[PTW.scala:139:{51,57,60}]
wire _resp_ae_ptw_T_12 = ~pte_u; // @[PTW.scala:139:66, :305:26]
wire _resp_ae_ptw_T_13 = _resp_ae_ptw_T_11 & _resp_ae_ptw_T_12; // @[PTW.scala:139:{57,63,66}]
wire _resp_ae_ptw_T_14 = ~(|pte_reserved_for_future); // @[PTW.scala:139:92, :305:26]
wire _resp_ae_ptw_T_15 = _resp_ae_ptw_T_13 & _resp_ae_ptw_T_14; // @[PTW.scala:139:{63,69,92}]
wire _resp_ae_ptw_T_16 = _resp_ae_ptw_T_1 & _resp_ae_ptw_T_15; // @[PTW.scala:139:69, :725:{27,53}]
wire _resp_ae_final_T = ~pte_w; // @[PTW.scala:139:42, :141:47, :305:26]
wire _resp_ae_final_T_1 = pte_x & _resp_ae_final_T; // @[PTW.scala:141:{44,47}, :305:26]
wire _resp_ae_final_T_2 = pte_r | _resp_ae_final_T_1; // @[PTW.scala:141:{38,44}, :305:26]
wire _resp_ae_final_T_3 = pte_v & _resp_ae_final_T_2; // @[PTW.scala:141:{32,38}, :305:26]
wire _resp_ae_final_T_4 = _resp_ae_final_T_3 & pte_a; // @[PTW.scala:141:{32,52}, :305:26]
wire _resp_ae_final_T_5 = ae & _resp_ae_final_T_4; // @[PTW.scala:141:52, :699:22, :726:29]
wire _resp_pf_T = ~stage2; // @[PTW.scala:282:19, :306:38, :727:26]
wire _resp_pf_T_1 = pf & _resp_pf_T; // @[PTW.scala:700:22, :727:{23,26}]
wire _resp_gf_T_3 = pf & stage2; // @[PTW.scala:282:19, :700:22, :728:30]
wire _resp_gf_T_4 = gf | _resp_gf_T_3; // @[PTW.scala:698:55, :728:{23,30}]
wire _resp_hr_T = ~stage2; // @[PTW.scala:282:19, :306:38, :729:20]
wire _resp_hr_T_1 = ~pf; // @[PTW.scala:700:22, :701:37, :729:32]
wire _resp_hr_T_2 = ~gf; // @[PTW.scala:698:55, :701:44, :729:39]
wire _resp_hr_T_3 = _resp_hr_T_1 & _resp_hr_T_2; // @[PTW.scala:729:{32,36,39}]
wire _resp_hr_T_4 = ~pte_w; // @[PTW.scala:139:42, :141:47, :305:26]
wire _resp_hr_T_5 = pte_x & _resp_hr_T_4; // @[PTW.scala:141:{44,47}, :305:26]
wire _resp_hr_T_6 = pte_r | _resp_hr_T_5; // @[PTW.scala:141:{38,44}, :305:26]
wire _resp_hr_T_7 = pte_v & _resp_hr_T_6; // @[PTW.scala:141:{32,38}, :305:26]
wire _resp_hr_T_8 = _resp_hr_T_7 & pte_a; // @[PTW.scala:141:{32,52}, :305:26]
wire _resp_hr_T_9 = _resp_hr_T_8 & pte_r; // @[PTW.scala:141:52, :149:35, :305:26]
wire _resp_hr_T_10 = _resp_hr_T_9 & pte_u; // @[PTW.scala:143:33, :149:35, :305:26]
wire _resp_hr_T_11 = _resp_hr_T_3 & _resp_hr_T_10; // @[PTW.scala:143:33, :729:{36,43}]
wire _resp_hr_T_12 = _resp_hr_T | _resp_hr_T_11; // @[PTW.scala:729:{20,28,43}]
wire _resp_hw_T = ~stage2; // @[PTW.scala:282:19, :306:38, :730:20]
wire _resp_hw_T_1 = ~pf; // @[PTW.scala:700:22, :701:37, :730:32]
wire _resp_hw_T_2 = ~gf; // @[PTW.scala:698:55, :701:44, :730:39]
wire _resp_hw_T_3 = _resp_hw_T_1 & _resp_hw_T_2; // @[PTW.scala:730:{32,36,39}]
wire _resp_hw_T_4 = ~pte_w; // @[PTW.scala:139:42, :141:47, :305:26]
wire _resp_hw_T_5 = pte_x & _resp_hw_T_4; // @[PTW.scala:141:{44,47}, :305:26]
wire _resp_hw_T_6 = pte_r | _resp_hw_T_5; // @[PTW.scala:141:{38,44}, :305:26]
wire _resp_hw_T_7 = pte_v & _resp_hw_T_6; // @[PTW.scala:141:{32,38}, :305:26]
wire _resp_hw_T_8 = _resp_hw_T_7 & pte_a; // @[PTW.scala:141:{32,52}, :305:26]
wire _resp_hw_T_9 = _resp_hw_T_8 & pte_w; // @[PTW.scala:141:52, :151:35, :305:26]
wire _resp_hw_T_10 = _resp_hw_T_9 & pte_d; // @[PTW.scala:151:{35,40}, :305:26]
wire _resp_hw_T_11 = _resp_hw_T_10 & pte_u; // @[PTW.scala:145:33, :151:40, :305:26]
wire _resp_hw_T_12 = _resp_hw_T_3 & _resp_hw_T_11; // @[PTW.scala:145:33, :730:{36,43}]
wire _resp_hw_T_13 = _resp_hw_T | _resp_hw_T_12; // @[PTW.scala:730:{20,28,43}]
wire _resp_hx_T = ~stage2; // @[PTW.scala:282:19, :306:38, :731:20]
wire _resp_hx_T_1 = ~pf; // @[PTW.scala:700:22, :701:37, :731:32]
wire _resp_hx_T_2 = ~gf; // @[PTW.scala:698:55, :701:44, :731:39]
wire _resp_hx_T_3 = _resp_hx_T_1 & _resp_hx_T_2; // @[PTW.scala:731:{32,36,39}]
wire _resp_hx_T_4 = ~pte_w; // @[PTW.scala:139:42, :141:47, :305:26]
wire _resp_hx_T_5 = pte_x & _resp_hx_T_4; // @[PTW.scala:141:{44,47}, :305:26]
wire _resp_hx_T_6 = pte_r | _resp_hx_T_5; // @[PTW.scala:141:{38,44}, :305:26]
wire _resp_hx_T_7 = pte_v & _resp_hx_T_6; // @[PTW.scala:141:{32,38}, :305:26]
wire _resp_hx_T_8 = _resp_hx_T_7 & pte_a; // @[PTW.scala:141:{32,52}, :305:26]
wire _resp_hx_T_9 = _resp_hx_T_8 & pte_x; // @[PTW.scala:141:52, :153:35, :305:26]
wire _resp_hx_T_10 = _resp_hx_T_9 & pte_u; // @[PTW.scala:147:33, :153:35, :305:26]
wire _resp_hx_T_11 = _resp_hx_T_3 & _resp_hx_T_10; // @[PTW.scala:147:33, :731:{36,43}]
wire _resp_hx_T_12 = _resp_hx_T | _resp_hx_T_11; // @[PTW.scala:731:{20,28,43}] |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_21( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0 // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
PE_277 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File EarlyTrapCheck.scala:
package saturn.frontend
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.diplomacy._
import saturn.common._
import saturn.backend.{VectorBackend}
class EarlyTrapCheck(edge: TLEdge, sgSize: Option[BigInt])(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val unified_addresses = AddressSet.unify(edge.manager.managers.map(_.address).flatten)
require(unified_addresses.forall(_.alignment >= (1 << pgIdxBits)),
"Memory devices on this system must be at least page-aligned")
val io = IO(new Bundle {
val sg_base = Input(UInt(coreMaxAddrBits.W))
val busy = Output(Bool())
val s0 = new Bundle {
val in = Input(Valid(new Bundle {
val inst = UInt(32.W)
val pc = UInt(vaddrBitsExtended.W)
val status = new MStatus
val vconfig = new VConfig
val vstart = UInt(log2Ceil(maxVLMax).W)
val rs1 = UInt(xLen.W)
val rs2 = UInt(xLen.W)
val phys = Bool()
}))
val tlb_req = Valid(new TLBReq(3))
}
val s1 = new Bundle {
val inst = Output(new VectorIssueInst)
val rs1 = Input(Valid(UInt(xLen.W)))
val kill = Input(Bool())
val tlb_req = Valid(new TLBReq(3))
val tlb_resp = Input(new TLBResp)
}
val s2 = new Bundle {
val scalar_store_pending = Input(Bool())
val inst = Valid(new VectorIssueInst)
val replay = Output(Bool())
val vstart = Valid(UInt(log2Ceil(maxVLMax).W))
val retire = Output(Bool())
val xcpt = Valid(new Bundle {
val cause = UInt(xLen.W)
val tval = UInt(coreMaxAddrBits.W)
})
val pc = Output(UInt(vaddrBitsExtended.W))
val internal_replay = Valid(new VectorIssueInst)
val issue = Decoupled(new VectorIssueInst)
val vxrm = Input(UInt(2.W))
val frm = Input(UInt(3.W))
}
})
val s1_valid = RegInit(false.B)
val s2_valid = RegInit(false.B)
io.busy := s1_valid || s2_valid
val s0_inst = Wire(new VectorIssueInst)
s0_inst.pc := io.s0.in.bits.pc
s0_inst.bits := io.s0.in.bits.inst
s0_inst.vconfig := io.s0.in.bits.vconfig
s0_inst.vstart := Mux(s1_valid || s2_valid, 0.U, io.s0.in.bits.vstart)
s0_inst.segstart := 0.U
s0_inst.segend := s0_inst.seg_nf
s0_inst.rs1_data := io.s0.in.bits.rs1
s0_inst.rs2_data := io.s0.in.bits.rs2
s0_inst.emul := Mux(io.s0.in.bits.vconfig.vtype.vlmul_sign, 0.U, io.s0.in.bits.vconfig.vtype.vlmul_mag)
s0_inst.page := DontCare
s0_inst.vat := DontCare
s0_inst.debug_id := DontCare
s0_inst.rm := DontCare
s0_inst.fast_sg := false.B
s0_inst.mop := s0_inst.orig_mop
when (s0_inst.vmu && s0_inst.mop === mopUnit) {
val mask_vl = (io.s0.in.bits.vconfig.vl >> 3) + Mux(io.s0.in.bits.vconfig.vl(2,0) === 0.U, 0.U, 1.U)
val whole_vl = (vLen.U >> (s0_inst.mem_elem_size +& 3.U)) * (s0_inst.nf +& 1.U)
s0_inst.vconfig.vl := MuxLookup(s0_inst.umop, io.s0.in.bits.vconfig.vl)(Seq(
(lumopWhole -> whole_vl), (lumopMask -> mask_vl)
))
when (s0_inst.umop === lumopWhole) {
s0_inst.emul := VecInit.tabulate(8)(nf => log2Ceil(nf+1).U)(s0_inst.nf)
}
}
when (!s0_inst.vmu && s0_inst.funct3 === OPIVI && s0_inst.funct6 === OPIFunct6.mvnrr.litValue.U) {
s0_inst.emul := log2_up(s0_inst.imm5, 8)
}
val s0_unit = s0_inst.mop === mopUnit || (s0_inst.mop === mopStrided && io.s0.in.bits.rs2 === ((s0_inst.nf +& 1.U) << s0_inst.mem_elem_size))
val s0_indexed = s0_inst.mop.isOneOf(mopOrdered, mopUnordered)
val s0_base = io.s0.in.bits.rs1 + (((s0_inst.seg_nf +& 1.U) * s0_inst.vstart ) << s0_inst.mem_elem_size)
val s0_bound = io.s0.in.bits.rs1 + (((s0_inst.seg_nf +& 1.U) * s0_inst.vconfig.vl) << s0_inst.mem_elem_size) - 1.U
val s0_single_page = (s0_base >> pgIdxBits) === (s0_bound >> pgIdxBits)
val s0_replay_next_page = s0_inst.vmu && s0_unit && s0_inst.nf === 0.U && !s0_single_page
val s0_iterative = (!s0_single_page || !s0_unit || s0_inst.umop === lumopFF) && !s0_replay_next_page
val s0_fast_sg = s0_iterative && io.s0.in.bits.phys && s0_inst.mop === mopUnordered && s0_inst.seg_nf === 0.U && sgSize.map { size =>
s0_base >= io.sg_base && s0_base < (io.sg_base + size.U)
}.getOrElse(false.B)
val s0_tlb_valid = !s0_iterative && s0_inst.vmu && s0_inst.vstart < s0_inst.vconfig.vl
io.s0.tlb_req.valid := s0_tlb_valid && io.s0.in.valid
io.s0.tlb_req.bits.vaddr := s0_base
io.s0.tlb_req.bits.passthrough := false.B
io.s0.tlb_req.bits.size := s0_inst.mem_elem_size
io.s0.tlb_req.bits.cmd := Mux(s0_inst.opcode(5), M_XWR, M_XRD)
io.s0.tlb_req.bits.prv := io.s0.in.bits.status.prv
io.s0.tlb_req.bits.v := io.s0.in.bits.status.v
// s1_stage
s1_valid := io.s0.in.fire
val s1_inst = RegEnable(s0_inst , io.s0.in.valid)
val s1_iterative = RegEnable(s0_iterative , io.s0.in.valid)
val s1_replay_next_page = RegEnable(s0_replay_next_page, io.s0.in.valid)
val s1_base = RegEnable(s0_base , io.s0.in.valid)
val s1_tlb_valid = RegEnable(s0_tlb_valid , io.s0.in.valid)
val s1_fast_sg = RegEnable(s0_fast_sg , io.s0.in.valid)
val s1_tlb_resp = WireInit(io.s1.tlb_resp)
when (!s1_tlb_valid) {
s1_tlb_resp := 0.U.asTypeOf(new TLBResp)
when (s1_fast_sg) {
s1_tlb_resp.paddr := s1_base
}
}
io.s1.inst := s1_inst
io.s1.tlb_req.valid := RegNext(io.s0.tlb_req.valid, false.B)
io.s1.tlb_req.bits := RegEnable(io.s0.tlb_req.bits, s0_tlb_valid)
// s2 stage
s2_valid := s1_valid && !io.s1.kill
val s2_inst = Reg(new VectorIssueInst)
val s2_base = RegEnable(s1_base, s1_valid)
val s2_iterative = RegEnable(s1_iterative , s1_valid)
val s2_fast_sg = RegEnable(s1_fast_sg , s1_valid)
val s2_replay_next_page = RegEnable(s1_replay_next_page, s1_valid)
when (s1_valid) {
s2_inst := s1_inst
when (io.s1.rs1.valid) { s2_inst.rs1_data := io.s1.rs1.bits }
}
val s2_tlb_resp = RegEnable(s1_tlb_resp, s1_valid)
val s2_xcpts = Seq(
(s2_tlb_resp.pf.st, Causes.store_page_fault.U),
(s2_tlb_resp.pf.ld, Causes.load_page_fault.U),
(s2_tlb_resp.gf.st, Causes.store_guest_page_fault.U),
(s2_tlb_resp.gf.ld, Causes.load_guest_page_fault.U),
(s2_tlb_resp.ae.st, Causes.store_access.U),
(s2_tlb_resp.ae.ld, Causes.load_access.U),
(s2_tlb_resp.ma.st, Causes.misaligned_store.U),
(s2_tlb_resp.ma.ld, Causes.misaligned_load.U)
)
val s2_xcpt = s2_xcpts.map(_._1).orR
val s2_cause = PriorityMux(s2_xcpts)
val s2_go_to_itc = WireInit(s2_inst.vmu && s2_iterative)
val s2_generate_xcpt = WireInit(s2_xcpt)
// masked checks, even in the fast case, need to
// to to ITC to get the precise element+address of the trap
when (s2_inst.vmu && s2_xcpt && !s2_inst.vm) {
s2_go_to_itc := true.B
s2_generate_xcpt := false.B
}
io.s2.inst.valid := s2_valid
io.s2.inst.bits := s2_inst
io.s2.replay := false.B
io.s2.vstart.valid := false.B
io.s2.vstart.bits := 0.U
io.s2.retire := false.B
io.s2.internal_replay.valid := false.B
io.s2.internal_replay.bits := s2_inst
io.s2.internal_replay.bits.rm := Mux(s2_inst.isOpf, io.s2.frm, io.s2.vxrm)
io.s2.xcpt.valid := false.B
io.s2.xcpt.bits.cause := s2_cause
io.s2.xcpt.bits.tval := s2_base
io.s2.pc := s2_inst.pc
io.s2.issue.valid := false.B
io.s2.issue.bits := s2_inst
io.s2.issue.bits.segstart := 0.U
io.s2.issue.bits.segend := s2_inst.seg_nf
io.s2.issue.bits.rm := Mux(s2_inst.isOpf, io.s2.frm, io.s2.vxrm)
io.s2.issue.bits.page := s2_tlb_resp.paddr >> pgIdxBits
val consumed = ((1 << pgIdxBits).U - s2_tlb_resp.paddr(pgIdxBits-1,0)) >> s2_inst.mem_elem_size
when (s2_inst.vmu && s2_replay_next_page) {
io.s2.issue.bits.vconfig.vl := s2_inst.vstart +& consumed
}
when (s2_valid) {
when (!io.s2.issue.ready || (io.s2.scalar_store_pending && s2_inst.vmu)) {
io.s2.replay := true.B
} .elsewhen (s2_inst.vstart =/= 0.U && !s2_inst.vmu) {
io.s2.xcpt.valid := true.B
io.s2.xcpt.bits.cause := Causes.illegal_instruction.U
io.s2.xcpt.bits.tval := s2_inst.pc
} .elsewhen (s2_inst.vstart >= s2_inst.vconfig.vl) {
io.s2.retire := true.B
io.s2.issue.valid := true.B
io.s2.vstart.valid := true.B
} .elsewhen (s2_tlb_resp.miss) {
io.s2.replay := true.B
} .elsewhen (s2_generate_xcpt) {
io.s2.xcpt.valid := true.B
} .elsewhen (s2_inst.vmu && s2_fast_sg) {
io.s2.retire := true.B
io.s2.issue.valid := true.B
io.s2.issue.bits.fast_sg := true.B
io.s2.vstart.valid := true.B
} .elsewhen (s2_go_to_itc) {
io.s2.internal_replay.valid := true.B
} .elsewhen (s2_replay_next_page) {
io.s2.replay := true.B
io.s2.issue.valid := true.B
io.s2.vstart.valid := true.B
io.s2.vstart.bits := s2_inst.vstart +& consumed
} .otherwise {
io.s2.retire := true.B
io.s2.vstart.valid := true.B
io.s2.issue.valid := true.B
}
}
}
| module EarlyTrapCheck( // @[EarlyTrapCheck.scala:15:7]
input clock, // @[EarlyTrapCheck.scala:15:7]
input reset, // @[EarlyTrapCheck.scala:15:7]
output io_busy, // @[EarlyTrapCheck.scala:21:14]
input io_s0_in_valid, // @[EarlyTrapCheck.scala:21:14]
input [31:0] io_s0_in_bits_inst, // @[EarlyTrapCheck.scala:21:14]
input [39:0] io_s0_in_bits_pc, // @[EarlyTrapCheck.scala:21:14]
input [1:0] io_s0_in_bits_status_prv, // @[EarlyTrapCheck.scala:21:14]
input [7:0] io_s0_in_bits_vconfig_vl, // @[EarlyTrapCheck.scala:21:14]
input io_s0_in_bits_vconfig_vtype_vill, // @[EarlyTrapCheck.scala:21:14]
input [54:0] io_s0_in_bits_vconfig_vtype_reserved, // @[EarlyTrapCheck.scala:21:14]
input io_s0_in_bits_vconfig_vtype_vma, // @[EarlyTrapCheck.scala:21:14]
input io_s0_in_bits_vconfig_vtype_vta, // @[EarlyTrapCheck.scala:21:14]
input [2:0] io_s0_in_bits_vconfig_vtype_vsew, // @[EarlyTrapCheck.scala:21:14]
input io_s0_in_bits_vconfig_vtype_vlmul_sign, // @[EarlyTrapCheck.scala:21:14]
input [1:0] io_s0_in_bits_vconfig_vtype_vlmul_mag, // @[EarlyTrapCheck.scala:21:14]
input [6:0] io_s0_in_bits_vstart, // @[EarlyTrapCheck.scala:21:14]
input [63:0] io_s0_in_bits_rs1, // @[EarlyTrapCheck.scala:21:14]
input [63:0] io_s0_in_bits_rs2, // @[EarlyTrapCheck.scala:21:14]
output io_s0_tlb_req_valid, // @[EarlyTrapCheck.scala:21:14]
output [39:0] io_s0_tlb_req_bits_vaddr, // @[EarlyTrapCheck.scala:21:14]
output [1:0] io_s0_tlb_req_bits_size, // @[EarlyTrapCheck.scala:21:14]
output [4:0] io_s0_tlb_req_bits_cmd, // @[EarlyTrapCheck.scala:21:14]
output [1:0] io_s0_tlb_req_bits_prv, // @[EarlyTrapCheck.scala:21:14]
output [31:0] io_s1_inst_bits, // @[EarlyTrapCheck.scala:21:14]
input io_s1_rs1_valid, // @[EarlyTrapCheck.scala:21:14]
input [63:0] io_s1_rs1_bits, // @[EarlyTrapCheck.scala:21:14]
input io_s1_kill, // @[EarlyTrapCheck.scala:21:14]
input io_s1_tlb_resp_miss, // @[EarlyTrapCheck.scala:21:14]
input [31:0] io_s1_tlb_resp_paddr, // @[EarlyTrapCheck.scala:21:14]
input io_s1_tlb_resp_pf_ld, // @[EarlyTrapCheck.scala:21:14]
input io_s1_tlb_resp_pf_st, // @[EarlyTrapCheck.scala:21:14]
input io_s1_tlb_resp_ae_ld, // @[EarlyTrapCheck.scala:21:14]
input io_s1_tlb_resp_ae_st, // @[EarlyTrapCheck.scala:21:14]
input io_s1_tlb_resp_ma_ld, // @[EarlyTrapCheck.scala:21:14]
input io_s1_tlb_resp_ma_st, // @[EarlyTrapCheck.scala:21:14]
input io_s2_scalar_store_pending, // @[EarlyTrapCheck.scala:21:14]
output io_s2_inst_valid, // @[EarlyTrapCheck.scala:21:14]
output [31:0] io_s2_inst_bits_bits, // @[EarlyTrapCheck.scala:21:14]
output io_s2_replay, // @[EarlyTrapCheck.scala:21:14]
output io_s2_vstart_valid, // @[EarlyTrapCheck.scala:21:14]
output [6:0] io_s2_vstart_bits, // @[EarlyTrapCheck.scala:21:14]
output io_s2_retire, // @[EarlyTrapCheck.scala:21:14]
output io_s2_xcpt_valid, // @[EarlyTrapCheck.scala:21:14]
output [63:0] io_s2_xcpt_bits_cause, // @[EarlyTrapCheck.scala:21:14]
output [39:0] io_s2_xcpt_bits_tval, // @[EarlyTrapCheck.scala:21:14]
output [39:0] io_s2_pc, // @[EarlyTrapCheck.scala:21:14]
output io_s2_internal_replay_valid, // @[EarlyTrapCheck.scala:21:14]
output [39:0] io_s2_internal_replay_bits_pc, // @[EarlyTrapCheck.scala:21:14]
output [31:0] io_s2_internal_replay_bits_bits, // @[EarlyTrapCheck.scala:21:14]
output [7:0] io_s2_internal_replay_bits_vconfig_vl, // @[EarlyTrapCheck.scala:21:14]
output io_s2_internal_replay_bits_vconfig_vtype_vill, // @[EarlyTrapCheck.scala:21:14]
output [54:0] io_s2_internal_replay_bits_vconfig_vtype_reserved, // @[EarlyTrapCheck.scala:21:14]
output io_s2_internal_replay_bits_vconfig_vtype_vma, // @[EarlyTrapCheck.scala:21:14]
output io_s2_internal_replay_bits_vconfig_vtype_vta, // @[EarlyTrapCheck.scala:21:14]
output [2:0] io_s2_internal_replay_bits_vconfig_vtype_vsew, // @[EarlyTrapCheck.scala:21:14]
output io_s2_internal_replay_bits_vconfig_vtype_vlmul_sign, // @[EarlyTrapCheck.scala:21:14]
output [1:0] io_s2_internal_replay_bits_vconfig_vtype_vlmul_mag, // @[EarlyTrapCheck.scala:21:14]
output [6:0] io_s2_internal_replay_bits_vstart, // @[EarlyTrapCheck.scala:21:14]
output [63:0] io_s2_internal_replay_bits_rs1_data, // @[EarlyTrapCheck.scala:21:14]
output [63:0] io_s2_internal_replay_bits_rs2_data, // @[EarlyTrapCheck.scala:21:14]
output [2:0] io_s2_internal_replay_bits_rm, // @[EarlyTrapCheck.scala:21:14]
output [1:0] io_s2_internal_replay_bits_emul, // @[EarlyTrapCheck.scala:21:14]
output [1:0] io_s2_internal_replay_bits_mop, // @[EarlyTrapCheck.scala:21:14]
input io_s2_issue_ready, // @[EarlyTrapCheck.scala:21:14]
output io_s2_issue_valid, // @[EarlyTrapCheck.scala:21:14]
output [31:0] io_s2_issue_bits_bits, // @[EarlyTrapCheck.scala:21:14]
output [7:0] io_s2_issue_bits_vconfig_vl, // @[EarlyTrapCheck.scala:21:14]
output [2:0] io_s2_issue_bits_vconfig_vtype_vsew, // @[EarlyTrapCheck.scala:21:14]
output io_s2_issue_bits_vconfig_vtype_vlmul_sign, // @[EarlyTrapCheck.scala:21:14]
output [1:0] io_s2_issue_bits_vconfig_vtype_vlmul_mag, // @[EarlyTrapCheck.scala:21:14]
output [6:0] io_s2_issue_bits_vstart, // @[EarlyTrapCheck.scala:21:14]
output [2:0] io_s2_issue_bits_segend, // @[EarlyTrapCheck.scala:21:14]
output [63:0] io_s2_issue_bits_rs1_data, // @[EarlyTrapCheck.scala:21:14]
output [63:0] io_s2_issue_bits_rs2_data, // @[EarlyTrapCheck.scala:21:14]
output [19:0] io_s2_issue_bits_page, // @[EarlyTrapCheck.scala:21:14]
output [2:0] io_s2_issue_bits_rm, // @[EarlyTrapCheck.scala:21:14]
output [1:0] io_s2_issue_bits_emul, // @[EarlyTrapCheck.scala:21:14]
output [1:0] io_s2_issue_bits_mop, // @[EarlyTrapCheck.scala:21:14]
input [1:0] io_s2_vxrm, // @[EarlyTrapCheck.scala:21:14]
input [2:0] io_s2_frm // @[EarlyTrapCheck.scala:21:14]
);
reg s1_valid; // @[EarlyTrapCheck.scala:65:25]
reg s2_valid; // @[EarlyTrapCheck.scala:66:25]
wire io_busy_0 = s1_valid | s2_valid; // @[EarlyTrapCheck.scala:65:25, :66:25, :67:23]
wire [6:0] s0_inst_vstart = io_busy_0 ? 7'h0 : io_s0_in_bits_vstart; // @[EarlyTrapCheck.scala:67:23, :73:26]
wire _GEN = io_s0_in_bits_inst[6:0] == 7'h7 | io_s0_in_bits_inst[6:0] == 7'h27; // @[Bundles.scala:56:20]
wire _s0_unit_T = io_s0_in_bits_inst[27:26] == 2'h0; // @[EarlyTrapCheck.scala:85:36]
wire _GEN_0 = _GEN & _s0_unit_T; // @[EarlyTrapCheck.scala:85:{21,36}]
wire [3:0] _GEN_1 = {1'h0, io_s0_in_bits_inst[31:29]}; // @[EarlyTrapCheck.scala:87:77]
wire [7:0] s0_inst_vconfig_vl = _GEN_0 ? (io_s0_in_bits_inst[24:20] == 5'hB ? {3'h0, io_s0_in_bits_vconfig_vl[7:3] + {4'h0, |(io_s0_in_bits_vconfig_vl[2:0])}} : io_s0_in_bits_inst[24:20] == 5'h8 ? (8'h80 >> {1'h0, io_s0_in_bits_inst[26] ? io_s0_in_bits_vconfig_vtype_vsew : {1'h0, io_s0_in_bits_inst[13:12]}} + 4'h3) * {4'h0, _GEN_1 + 4'h1} : io_s0_in_bits_vconfig_vl) : io_s0_in_bits_vconfig_vl; // @[EarlyTrapCheck.scala:15:7, :72:19, :85:{21,49}, :86:{45,51,81,87}, :87:{28,54,63,77}, :88:{24,76}]
wire [2:0] _GEN_2 = {1'h0, io_s0_in_bits_inst[13:12]}; // @[Bundles.scala:59:{26,59}]
wire s0_unit = _s0_unit_T | io_s0_in_bits_inst[27:26] == 2'h2 & io_s0_in_bits_rs2 == {53'h0, {7'h0, _GEN_1 + 4'h1} << (io_s0_in_bits_inst[26] ? io_s0_in_bits_vconfig_vtype_vsew : _GEN_2)}; // @[EarlyTrapCheck.scala:85:36, :87:77, :99:{41,57,72,93,110,118}]
wire [63:0] _s0_base_T_13 = io_s0_in_bits_rs1 + {46'h0, {7'h0, {7'h0, {1'h0, ~(|(io_s0_in_bits_inst[27:26])) & io_s0_in_bits_inst[24:20] == 5'h8 ? 3'h0 : io_s0_in_bits_inst[31:29]} + 4'h1} * {4'h0, s0_inst_vstart}} << (io_s0_in_bits_inst[26] ? io_s0_in_bits_vconfig_vtype_vsew : _GEN_2)}; // @[EarlyTrapCheck.scala:15:7, :73:26, :101:{36,56,64,86}]
wire [63:0] _s0_bound_T_15 = io_s0_in_bits_rs1 + {45'h0, {7'h0, {8'h0, {1'h0, ~(|(io_s0_in_bits_inst[27:26])) & io_s0_in_bits_inst[24:20] == 5'h8 ? 3'h0 : io_s0_in_bits_inst[31:29]} + 4'h1} * {4'h0, s0_inst_vconfig_vl}} << (io_s0_in_bits_inst[26] ? io_s0_in_bits_vconfig_vtype_vsew : _GEN_2)} - 64'h1; // @[EarlyTrapCheck.scala:15:7, :72:19, :85:49, :87:63, :88:24, :102:{36,56,64,86,112}]
wire _s0_iterative_T = _s0_base_T_13[63:12] != _s0_bound_T_15[63:12]; // @[EarlyTrapCheck.scala:101:36, :102:{36,112}, :103:{33,47,61}, :104:77]
wire s0_replay_next_page = (io_s0_in_bits_inst[6:0] == 7'h7 | io_s0_in_bits_inst[6:0] == 7'h27) & s0_unit & io_s0_in_bits_inst[31:29] == 3'h0 & _s0_iterative_T; // @[EarlyTrapCheck.scala:99:41, :103:47, :104:{41,52,66,74,77}]
wire s0_iterative = (_s0_iterative_T | ~s0_unit | io_s0_in_bits_inst[24:20] == 5'h10) & ~s0_replay_next_page; // @[EarlyTrapCheck.scala:15:7, :99:41, :103:47, :104:{41,52,74,77}, :105:{39,42,51,67,80,83}]
wire s0_tlb_valid = ~s0_iterative & (io_s0_in_bits_inst[6:0] == 7'h7 | io_s0_in_bits_inst[6:0] == 7'h27) & {1'h0, s0_inst_vstart} < s0_inst_vconfig_vl; // @[EarlyTrapCheck.scala:72:19, :73:26, :85:49, :88:24, :105:80, :110:{22,36,51,69}]
reg [39:0] s1_inst_pc; // @[EarlyTrapCheck.scala:122:38]
reg [31:0] s1_inst_bits; // @[EarlyTrapCheck.scala:122:38]
reg [7:0] s1_inst_vconfig_vl; // @[EarlyTrapCheck.scala:122:38]
reg s1_inst_vconfig_vtype_vill; // @[EarlyTrapCheck.scala:122:38]
reg [54:0] s1_inst_vconfig_vtype_reserved; // @[EarlyTrapCheck.scala:122:38]
reg s1_inst_vconfig_vtype_vma; // @[EarlyTrapCheck.scala:122:38]
reg s1_inst_vconfig_vtype_vta; // @[EarlyTrapCheck.scala:122:38]
reg [2:0] s1_inst_vconfig_vtype_vsew; // @[EarlyTrapCheck.scala:122:38]
reg s1_inst_vconfig_vtype_vlmul_sign; // @[EarlyTrapCheck.scala:122:38]
reg [1:0] s1_inst_vconfig_vtype_vlmul_mag; // @[EarlyTrapCheck.scala:122:38]
reg [6:0] s1_inst_vstart; // @[EarlyTrapCheck.scala:122:38]
reg [63:0] s1_inst_rs1_data; // @[EarlyTrapCheck.scala:122:38]
reg [63:0] s1_inst_rs2_data; // @[EarlyTrapCheck.scala:122:38]
reg [1:0] s1_inst_emul; // @[EarlyTrapCheck.scala:122:38]
reg [1:0] s1_inst_mop; // @[EarlyTrapCheck.scala:122:38]
reg s1_iterative; // @[EarlyTrapCheck.scala:123:38]
reg s1_replay_next_page; // @[EarlyTrapCheck.scala:124:38]
reg [63:0] s1_base; // @[EarlyTrapCheck.scala:125:38]
reg s1_tlb_valid; // @[EarlyTrapCheck.scala:126:38]
reg [39:0] s2_inst_pc; // @[EarlyTrapCheck.scala:143:20]
reg [31:0] s2_inst_bits; // @[EarlyTrapCheck.scala:143:20]
reg [7:0] s2_inst_vconfig_vl; // @[EarlyTrapCheck.scala:143:20]
reg s2_inst_vconfig_vtype_vill; // @[EarlyTrapCheck.scala:143:20]
reg [54:0] s2_inst_vconfig_vtype_reserved; // @[EarlyTrapCheck.scala:143:20]
reg s2_inst_vconfig_vtype_vma; // @[EarlyTrapCheck.scala:143:20]
reg s2_inst_vconfig_vtype_vta; // @[EarlyTrapCheck.scala:143:20]
reg [2:0] s2_inst_vconfig_vtype_vsew; // @[EarlyTrapCheck.scala:143:20]
reg s2_inst_vconfig_vtype_vlmul_sign; // @[EarlyTrapCheck.scala:143:20]
reg [1:0] s2_inst_vconfig_vtype_vlmul_mag; // @[EarlyTrapCheck.scala:143:20]
reg [6:0] s2_inst_vstart; // @[EarlyTrapCheck.scala:143:20]
reg [63:0] s2_inst_rs1_data; // @[EarlyTrapCheck.scala:143:20]
reg [63:0] s2_inst_rs2_data; // @[EarlyTrapCheck.scala:143:20]
reg [1:0] s2_inst_emul; // @[EarlyTrapCheck.scala:143:20]
reg [1:0] s2_inst_mop; // @[EarlyTrapCheck.scala:143:20]
reg [63:0] s2_base; // @[EarlyTrapCheck.scala:144:26]
reg s2_iterative; // @[EarlyTrapCheck.scala:145:38]
reg s2_replay_next_page; // @[EarlyTrapCheck.scala:147:38]
reg s2_tlb_resp_miss; // @[EarlyTrapCheck.scala:152:30]
reg [31:0] s2_tlb_resp_paddr; // @[EarlyTrapCheck.scala:152:30]
reg s2_tlb_resp_pf_ld; // @[EarlyTrapCheck.scala:152:30]
reg s2_tlb_resp_pf_st; // @[EarlyTrapCheck.scala:152:30]
reg s2_tlb_resp_ae_ld; // @[EarlyTrapCheck.scala:152:30]
reg s2_tlb_resp_ae_st; // @[EarlyTrapCheck.scala:152:30]
reg s2_tlb_resp_ma_ld; // @[EarlyTrapCheck.scala:152:30]
reg s2_tlb_resp_ma_st; // @[EarlyTrapCheck.scala:152:30]
wire s2_xcpt = s2_tlb_resp_pf_st | s2_tlb_resp_pf_ld | s2_tlb_resp_ae_st | s2_tlb_resp_ae_ld | s2_tlb_resp_ma_st | s2_tlb_resp_ma_ld; // @[EarlyTrapCheck.scala:152:30]
wire _GEN_3 = s2_inst_bits[6:0] == 7'h7 | s2_inst_bits[6:0] == 7'h27; // @[EarlyTrapCheck.scala:143:20]
wire _GEN_4 = _GEN_3 & s2_xcpt & ~(s2_inst_bits[25]); // @[EarlyTrapCheck.scala:143:20, :171:{21,32,35}]
wire s2_go_to_itc = _GEN_4 | (s2_inst_bits[6:0] == 7'h7 | s2_inst_bits[6:0] == 7'h27) & s2_iterative; // @[EarlyTrapCheck.scala:143:20, :145:38, :166:{30,43}, :171:{21,32,48}, :172:18]
wire s2_generate_xcpt = ~_GEN_4 & s2_xcpt; // @[EarlyTrapCheck.scala:167:34, :171:{21,32,48}, :173:22]
wire [2:0] _GEN_5 = {1'h0, io_s2_vxrm}; // @[EarlyTrapCheck.scala:184:39]
wire [12:0] consumed = 13'h1000 - {1'h0, s2_tlb_resp_paddr[11:0]} >> (s2_inst_mop[0] ? s2_inst_vconfig_vtype_vsew : {1'h0, s2_inst_bits[13:12]}); // @[EarlyTrapCheck.scala:143:20, :152:30, :196:{38,57,74}]
wire [7:0] _GEN_6 = {1'h0, s2_inst_vstart}; // @[EarlyTrapCheck.scala:143:20, :198:51]
wire _GEN_7 = ~io_s2_issue_ready | io_s2_scalar_store_pending & _GEN_3; // @[EarlyTrapCheck.scala:202:{11,30,61}]
wire _GEN_8 = (|s2_inst_vstart) & ~_GEN_3; // @[EarlyTrapCheck.scala:143:20, :204:{33,41,44}]
wire _GEN_9 = ~s2_valid | _GEN_7 | ~_GEN_8; // @[EarlyTrapCheck.scala:66:25, :186:29, :201:19, :202:{30,78}, :204:{41,58}]
wire _GEN_10 = _GEN_6 >= s2_inst_vconfig_vl; // @[EarlyTrapCheck.scala:143:20, :198:51, :208:33]
wire _GEN_11 = s2_tlb_resp_miss | s2_generate_xcpt; // @[EarlyTrapCheck.scala:152:30, :167:34, :171:48, :173:22, :182:33, :212:36, :214:36, :216:45, :221:32]
wire _GEN_12 = s2_generate_xcpt | s2_go_to_itc; // @[EarlyTrapCheck.scala:166:30, :167:34, :171:48, :172:18, :173:22, :178:22, :214:36, :216:45, :221:32, :223:39]
wire _GEN_13 = _GEN_7 | _GEN_8; // @[EarlyTrapCheck.scala:181:22, :202:{30,78}, :204:{41,58}, :208:56]
wire io_s2_issue_valid_0 = s2_valid & ~_GEN_13 & (_GEN_10 | ~_GEN_11 & ~s2_go_to_itc); // @[EarlyTrapCheck.scala:66:25, :166:30, :171:48, :172:18, :179:22, :181:22, :182:33, :201:19, :202:78, :204:58, :208:{33,56}, :211:26, :212:36, :214:36, :216:45, :221:32, :223:39]
wire [7:0][1:0] _GEN_14 = '{2'h3, 2'h3, 2'h3, 2'h3, 2'h2, 2'h2, 2'h1, 2'h0};
always @(posedge clock) begin // @[EarlyTrapCheck.scala:15:7]
if (reset) begin // @[EarlyTrapCheck.scala:15:7]
s1_valid <= 1'h0; // @[EarlyTrapCheck.scala:65:25]
s2_valid <= 1'h0; // @[EarlyTrapCheck.scala:66:25]
end
else begin // @[EarlyTrapCheck.scala:15:7]
s1_valid <= io_s0_in_valid; // @[EarlyTrapCheck.scala:65:25]
s2_valid <= s1_valid & ~io_s1_kill; // @[EarlyTrapCheck.scala:65:25, :66:25, :142:{24,27}]
end
if (io_s0_in_valid) begin // @[EarlyTrapCheck.scala:21:14]
s1_inst_pc <= io_s0_in_bits_pc; // @[EarlyTrapCheck.scala:122:38]
s1_inst_bits <= io_s0_in_bits_inst; // @[EarlyTrapCheck.scala:122:38]
s1_inst_vconfig_vl <= s0_inst_vconfig_vl; // @[EarlyTrapCheck.scala:72:19, :85:49, :88:24, :122:38]
s1_inst_vconfig_vtype_vill <= io_s0_in_bits_vconfig_vtype_vill; // @[EarlyTrapCheck.scala:122:38]
s1_inst_vconfig_vtype_reserved <= io_s0_in_bits_vconfig_vtype_reserved; // @[EarlyTrapCheck.scala:122:38]
s1_inst_vconfig_vtype_vma <= io_s0_in_bits_vconfig_vtype_vma; // @[EarlyTrapCheck.scala:122:38]
s1_inst_vconfig_vtype_vta <= io_s0_in_bits_vconfig_vtype_vta; // @[EarlyTrapCheck.scala:122:38]
s1_inst_vconfig_vtype_vsew <= io_s0_in_bits_vconfig_vtype_vsew; // @[EarlyTrapCheck.scala:122:38]
s1_inst_vconfig_vtype_vlmul_sign <= io_s0_in_bits_vconfig_vtype_vlmul_sign; // @[EarlyTrapCheck.scala:122:38]
s1_inst_vconfig_vtype_vlmul_mag <= io_s0_in_bits_vconfig_vtype_vlmul_mag; // @[EarlyTrapCheck.scala:122:38]
s1_inst_vstart <= s0_inst_vstart; // @[EarlyTrapCheck.scala:73:26, :122:38]
s1_inst_rs1_data <= io_s0_in_bits_rs1; // @[EarlyTrapCheck.scala:122:38]
s1_inst_rs2_data <= io_s0_in_bits_rs2; // @[EarlyTrapCheck.scala:122:38]
s1_inst_emul <= ~_GEN & io_s0_in_bits_inst[14:12] == 3'h3 & io_s0_in_bits_inst[31:26] == 6'h27 ? _GEN_14[io_s0_in_bits_inst[17:15]] : _GEN_0 & io_s0_in_bits_inst[24:20] == 5'h8 ? _GEN_14[io_s0_in_bits_inst[31:29]] : io_s0_in_bits_vconfig_vtype_vlmul_sign ? 2'h0 : io_s0_in_bits_vconfig_vtype_vlmul_mag; // @[EarlyTrapCheck.scala:15:7, :78:{20,26}, :85:{21,49}, :91:{24,40}, :92:20, :95:{9,22,40,50,68,100}, :96:18, :122:38]
s1_inst_mop <= io_s0_in_bits_inst[27:26]; // @[EarlyTrapCheck.scala:122:38]
s1_iterative <= s0_iterative; // @[EarlyTrapCheck.scala:105:80, :123:38]
s1_replay_next_page <= s0_replay_next_page; // @[EarlyTrapCheck.scala:104:{41,52,74}, :124:38]
s1_base <= _s0_base_T_13; // @[EarlyTrapCheck.scala:101:36, :125:38]
s1_tlb_valid <= s0_tlb_valid; // @[EarlyTrapCheck.scala:110:{36,51}, :126:38]
end
if (s1_valid) begin // @[EarlyTrapCheck.scala:65:25]
s2_inst_pc <= s1_inst_pc; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_bits <= s1_inst_bits; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_vconfig_vl <= s1_inst_vconfig_vl; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_vconfig_vtype_vill <= s1_inst_vconfig_vtype_vill; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_vconfig_vtype_reserved <= s1_inst_vconfig_vtype_reserved; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_vconfig_vtype_vma <= s1_inst_vconfig_vtype_vma; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_vconfig_vtype_vta <= s1_inst_vconfig_vtype_vta; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_vconfig_vtype_vsew <= s1_inst_vconfig_vtype_vsew; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_vconfig_vtype_vlmul_sign <= s1_inst_vconfig_vtype_vlmul_sign; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_vconfig_vtype_vlmul_mag <= s1_inst_vconfig_vtype_vlmul_mag; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_vstart <= s1_inst_vstart; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_rs1_data <= io_s1_rs1_valid ? io_s1_rs1_bits : s1_inst_rs1_data; // @[EarlyTrapCheck.scala:122:38, :143:20, :149:13, :150:{28,47}]
s2_inst_rs2_data <= s1_inst_rs2_data; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_emul <= s1_inst_emul; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_inst_mop <= s1_inst_mop; // @[EarlyTrapCheck.scala:122:38, :143:20]
s2_base <= s1_base; // @[EarlyTrapCheck.scala:125:38, :144:26]
s2_iterative <= s1_iterative; // @[EarlyTrapCheck.scala:123:38, :145:38]
s2_replay_next_page <= s1_replay_next_page; // @[EarlyTrapCheck.scala:124:38, :147:38]
s2_tlb_resp_miss <= s1_tlb_valid & io_s1_tlb_resp_miss; // @[EarlyTrapCheck.scala:126:38, :128:37, :130:24, :131:17, :152:30]
s2_tlb_resp_paddr <= s1_tlb_valid ? io_s1_tlb_resp_paddr : 32'h0; // @[EarlyTrapCheck.scala:126:38, :128:37, :130:24, :132:23, :152:30]
s2_tlb_resp_pf_ld <= s1_tlb_valid & io_s1_tlb_resp_pf_ld; // @[EarlyTrapCheck.scala:126:38, :128:37, :130:24, :131:17, :152:30]
s2_tlb_resp_pf_st <= s1_tlb_valid & io_s1_tlb_resp_pf_st; // @[EarlyTrapCheck.scala:126:38, :128:37, :130:24, :131:17, :152:30]
s2_tlb_resp_ae_ld <= s1_tlb_valid & io_s1_tlb_resp_ae_ld; // @[EarlyTrapCheck.scala:126:38, :128:37, :130:24, :131:17, :152:30]
s2_tlb_resp_ae_st <= s1_tlb_valid & io_s1_tlb_resp_ae_st; // @[EarlyTrapCheck.scala:126:38, :128:37, :130:24, :131:17, :152:30]
s2_tlb_resp_ma_ld <= s1_tlb_valid & io_s1_tlb_resp_ma_ld; // @[EarlyTrapCheck.scala:126:38, :128:37, :130:24, :131:17, :152:30]
s2_tlb_resp_ma_st <= s1_tlb_valid & io_s1_tlb_resp_ma_st; // @[EarlyTrapCheck.scala:126:38, :128:37, :130:24, :131:17, :152:30]
end
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_146( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0, // @[Tile.scala:17:14]
output io_bad_dataflow // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
wire io_bad_dataflow_0; // @[Tile.scala:16:7]
PE_402 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0),
.io_bad_dataflow (io_bad_dataflow_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_84( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [27:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_wo_ready_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_wo_ready_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_4_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_5_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_set_wo_ready_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_wo_ready_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_interm_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [3:0] _c_opcodes_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_4_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_5_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [130:0] _c_opcodes_set_T_1 = 131'h0; // @[Monitor.scala:767:54]
wire [130:0] _c_sizes_set_T_1 = 131'h0; // @[Monitor.scala:768:52]
wire [6:0] _c_opcodes_set_T = 7'h0; // @[Monitor.scala:767:79]
wire [6:0] _c_sizes_set_T = 7'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [15:0] _c_set_wo_ready_T = 16'h1; // @[OneHot.scala:58:35]
wire [15:0] _c_set_T = 16'h1; // @[OneHot.scala:58:35]
wire [39:0] c_opcodes_set = 40'h0; // @[Monitor.scala:740:34]
wire [39:0] c_sizes_set = 40'h0; // @[Monitor.scala:741:34]
wire [9:0] c_set = 10'h0; // @[Monitor.scala:738:34]
wire [9:0] c_set_wo_ready = 10'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [3:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 4'hA; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [27:0] _is_aligned_T = {22'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 28'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [3:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [3:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 4'hA; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_672 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_672; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_672; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [3:0] source; // @[Monitor.scala:390:22]
reg [27:0] address; // @[Monitor.scala:391:22]
wire _T_745 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_745; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_745; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_745; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [3:0] source_1; // @[Monitor.scala:541:22]
reg [9:0] inflight; // @[Monitor.scala:614:27]
reg [39:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [39:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [9:0] a_set; // @[Monitor.scala:626:34]
wire [9:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [39:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [39:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [6:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [6:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [6:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [6:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [6:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [6:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [6:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [6:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [6:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [39:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [39:0] _a_opcode_lookup_T_6 = {36'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [39:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[39:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [39:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [39:0] _a_size_lookup_T_6 = {36'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [39:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[39:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [15:0] _GEN_2 = 16'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [15:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [15:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire _T_598 = _T_672 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_598 ? _a_set_T[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [6:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [6:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [6:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [130:0] _a_opcodes_set_T_1 = {127'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[39:0] : 40'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [130:0] _a_sizes_set_T_1 = {127'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[39:0] : 40'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [9:0] d_clr; // @[Monitor.scala:664:34]
wire [9:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [39:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [39:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [15:0] _GEN_5 = 16'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire _T_613 = _T_745 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_613 ? _d_clr_T[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire [142:0] _d_opcodes_clr_T_5 = 143'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[39:0] : 40'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [142:0] _d_sizes_clr_T_5 = 143'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[39:0] : 40'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [9:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [9:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [9:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [39:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [39:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [39:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [39:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [39:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [39:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [9:0] inflight_1; // @[Monitor.scala:726:35]
wire [9:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [39:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [39:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [39:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [39:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [39:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [39:0] _c_opcode_lookup_T_6 = {36'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [39:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[39:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [39:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [39:0] _c_size_lookup_T_6 = {36'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [39:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[39:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [9:0] d_clr_1; // @[Monitor.scala:774:34]
wire [9:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [39:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [39:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_716 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_716 & d_release_ack_1 ? _d_clr_wo_ready_T_1[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire _T_698 = _T_745 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_698 ? _d_clr_T_1[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire [142:0] _d_opcodes_clr_T_11 = 143'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_698 ? _d_opcodes_clr_T_11[39:0] : 40'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [142:0] _d_sizes_clr_T_11 = 143'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_698 ? _d_sizes_clr_T_11[39:0] : 40'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 4'h0; // @[Monitor.scala:36:7, :795:113]
wire [9:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [9:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [39:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [39:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [39:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [39:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_193( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [31:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7]
wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54]
wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File MSHR.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module MSHR_70( // @[MSHR.scala:84:7]
input clock, // @[MSHR.scala:84:7]
input reset, // @[MSHR.scala:84:7]
input io_allocate_valid, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_1, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_2, // @[MSHR.scala:86:14]
input io_allocate_bits_control, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14]
input [8:0] io_allocate_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14]
input [10:0] io_allocate_bits_set, // @[MSHR.scala:86:14]
input io_allocate_bits_repeat, // @[MSHR.scala:86:14]
input io_directory_valid, // @[MSHR.scala:86:14]
input io_directory_bits_dirty, // @[MSHR.scala:86:14]
input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14]
input io_directory_bits_clients, // @[MSHR.scala:86:14]
input [8:0] io_directory_bits_tag, // @[MSHR.scala:86:14]
input io_directory_bits_hit, // @[MSHR.scala:86:14]
input [3:0] io_directory_bits_way, // @[MSHR.scala:86:14]
output io_status_valid, // @[MSHR.scala:86:14]
output [10:0] io_status_bits_set, // @[MSHR.scala:86:14]
output [8:0] io_status_bits_tag, // @[MSHR.scala:86:14]
output [3:0] io_status_bits_way, // @[MSHR.scala:86:14]
output io_status_bits_blockB, // @[MSHR.scala:86:14]
output io_status_bits_nestB, // @[MSHR.scala:86:14]
output io_status_bits_blockC, // @[MSHR.scala:86:14]
output io_status_bits_nestC, // @[MSHR.scala:86:14]
input io_schedule_ready, // @[MSHR.scala:86:14]
output io_schedule_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_a_valid, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14]
output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14]
output io_schedule_bits_b_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14]
output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14]
output io_schedule_bits_c_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14]
output io_schedule_bits_d_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14]
output io_schedule_bits_e_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14]
output io_schedule_bits_x_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14]
output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14]
output io_schedule_bits_reload, // @[MSHR.scala:86:14]
input io_sinkc_valid, // @[MSHR.scala:86:14]
input io_sinkc_bits_last, // @[MSHR.scala:86:14]
input [10:0] io_sinkc_bits_set, // @[MSHR.scala:86:14]
input [8:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14]
input io_sinkc_bits_data, // @[MSHR.scala:86:14]
input io_sinkd_valid, // @[MSHR.scala:86:14]
input io_sinkd_bits_last, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14]
input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14]
input io_sinkd_bits_denied, // @[MSHR.scala:86:14]
input io_sinke_valid, // @[MSHR.scala:86:14]
input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14]
input [10:0] io_nestedwb_set, // @[MSHR.scala:86:14]
input [8:0] io_nestedwb_tag, // @[MSHR.scala:86:14]
input io_nestedwb_b_toN, // @[MSHR.scala:86:14]
input io_nestedwb_b_toB, // @[MSHR.scala:86:14]
input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14]
input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14]
);
wire [8:0] final_meta_writeback_tag; // @[MSHR.scala:215:38]
wire final_meta_writeback_clients; // @[MSHR.scala:215:38]
wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38]
wire final_meta_writeback_dirty; // @[MSHR.scala:215:38]
wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7]
wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7]
wire [8:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7]
wire [10:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7]
wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7]
wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7]
wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7]
wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7]
wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7]
wire [8:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7]
wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7]
wire [3:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7]
wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7]
wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7]
wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7]
wire [10:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7]
wire [8:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7]
wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7]
wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7]
wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7]
wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7]
wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7]
wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7]
wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7]
wire [10:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7]
wire [8:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7]
wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7]
wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7]
wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68]
wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80]
wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21]
wire invalid_clients = 1'h0; // @[MSHR.scala:268:21]
wire _excluded_client_T = 1'h0; // @[MSHR.scala:279:38]
wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137]
wire _excluded_client_T_9 = 1'h0; // @[MSHR.scala:279:57]
wire excluded_client = 1'h0; // @[MSHR.scala:279:28]
wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire allocate_as_full_prio_0 = 1'h0; // @[MSHR.scala:504:34]
wire new_request_prio_0 = 1'h0; // @[MSHR.scala:506:24]
wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137]
wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _io_schedule_bits_b_bits_clients_T = 1'h1; // @[MSHR.scala:289:53]
wire _last_probe_T_1 = 1'h1; // @[MSHR.scala:459:66]
wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7]
wire [8:0] invalid_tag = 9'h0; // @[MSHR.scala:268:21]
wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21]
wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70]
wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34]
wire [8:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34]
wire [10:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34]
wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40]
wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93]
wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28]
wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39]
wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105]
wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55]
wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91]
wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41]
wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41]
wire [8:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41]
wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51]
wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64]
wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41]
wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41]
wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57]
wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41]
wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43]
wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40]
wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66]
wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41]
wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41]
wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41]
wire [8:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41]
wire no_wait; // @[MSHR.scala:183:83]
wire [10:0] io_status_bits_set_0; // @[MSHR.scala:84:7]
wire [8:0] io_status_bits_tag_0; // @[MSHR.scala:84:7]
wire [3:0] io_status_bits_way_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockB_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestB_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockC_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestC_0; // @[MSHR.scala:84:7]
wire io_status_valid_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7]
wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7]
wire io_schedule_valid_0; // @[MSHR.scala:84:7]
reg request_valid; // @[MSHR.scala:97:30]
assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30]
reg request_prio_1; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20]
reg request_prio_2; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20]
reg request_control; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_opcode; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_param; // @[MSHR.scala:98:20]
reg [2:0] request_size; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_source; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20]
reg [8:0] request_tag; // @[MSHR.scala:98:20]
assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_offset; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_put; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20]
reg [10:0] request_set; // @[MSHR.scala:98:20]
assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
reg meta_valid; // @[MSHR.scala:99:27]
reg meta_dirty; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17]
reg [1:0] meta_state; // @[MSHR.scala:100:17]
reg meta_clients; // @[MSHR.scala:100:17]
wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39]
assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients; // @[MSHR.scala:100:17, :289:51]
wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire _last_probe_T_2 = meta_clients; // @[MSHR.scala:100:17, :459:64]
reg [8:0] meta_tag; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17]
reg meta_hit; // @[MSHR.scala:100:17]
reg [3:0] meta_way; // @[MSHR.scala:100:17]
assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
wire [3:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38]
reg s_rprobe; // @[MSHR.scala:121:33]
reg w_rprobeackfirst; // @[MSHR.scala:122:33]
reg w_rprobeacklast; // @[MSHR.scala:123:33]
reg s_release; // @[MSHR.scala:124:33]
reg w_releaseack; // @[MSHR.scala:125:33]
reg s_pprobe; // @[MSHR.scala:126:33]
reg s_acquire; // @[MSHR.scala:127:33]
reg s_flush; // @[MSHR.scala:128:33]
reg w_grantfirst; // @[MSHR.scala:129:33]
reg w_grantlast; // @[MSHR.scala:130:33]
reg w_grant; // @[MSHR.scala:131:33]
reg w_pprobeackfirst; // @[MSHR.scala:132:33]
reg w_pprobeacklast; // @[MSHR.scala:133:33]
reg w_pprobeack; // @[MSHR.scala:134:33]
reg s_grantack; // @[MSHR.scala:136:33]
reg s_execute; // @[MSHR.scala:137:33]
reg w_grantack; // @[MSHR.scala:138:33]
reg s_writeback; // @[MSHR.scala:139:33]
reg [2:0] sink; // @[MSHR.scala:147:17]
assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17]
reg gotT; // @[MSHR.scala:148:17]
reg bad_grant; // @[MSHR.scala:149:22]
assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22]
reg probes_done; // @[MSHR.scala:150:24]
reg probes_toN; // @[MSHR.scala:151:23]
reg probes_noT; // @[MSHR.scala:152:23]
wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28]
wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45]
wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62]
wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}]
wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82]
wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}]
wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103]
wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}]
assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}]
assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40]
wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39]
wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}]
wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}]
wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96]
assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}]
assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93]
assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28]
assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28]
wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43]
wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64]
wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}]
wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85]
wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}]
assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}]
assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39]
wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33]
wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}]
wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}]
assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}]
assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83]
wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31]
wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}]
assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}]
assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55]
wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31]
wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44]
assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}]
assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41]
wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32]
wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}]
assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}]
assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64]
wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31]
wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}]
assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}]
assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57]
wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31]
assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}]
assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43]
wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31]
assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}]
assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40]
wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34]
wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}]
wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70]
wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}]
assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}]
assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66]
wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49]
wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}]
wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}]
wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49]
wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}]
assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}]
assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105]
wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71]
wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71]
wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71]
wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire [8:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71]
wire final_meta_writeback_hit; // @[MSHR.scala:215:38]
wire req_clientBit = request_source == 6'h28; // @[Parameters.scala:46:9]
wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12]
wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12]
wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _req_needT_T_2; // @[Parameters.scala:270:13]
assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13]
wire _excluded_client_T_6; // @[Parameters.scala:279:117]
assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117]
wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42]
wire _req_needT_T_3; // @[Parameters.scala:270:42]
assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42]
wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11]
assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11]
wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42]
wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _req_needT_T_6; // @[Parameters.scala:271:14]
assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14]
wire _req_acquire_T; // @[MSHR.scala:219:36]
assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14]
wire _excluded_client_T_1; // @[Parameters.scala:279:12]
assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12]
wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52]
wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89]
wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52]
wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}]
wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}]
wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81]
wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}]
wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}]
wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}]
wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65]
wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}]
wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55]
wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78]
wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78]
assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78]
wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70]
wire _evict_T_2; // @[MSHR.scala:317:26]
assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _before_T_1; // @[MSHR.scala:317:26]
assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}]
wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}]
wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43]
assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43]
wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75]
wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}]
wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}]
wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54]
wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}]
wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45]
wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}]
wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}]
wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40]
wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40]
assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40]
wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65]
assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65]
wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41]
wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}]
wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72]
wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}]
wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70]
wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70]
wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53]
assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53]
wire _evict_T_1; // @[MSHR.scala:317:26]
assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire _before_T; // @[MSHR.scala:317:26]
assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70]
wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70]
wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55]
wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70]
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70]
wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66]
wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}]
wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}]
wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40]
assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30]
wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54]
wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}]
assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21]
assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21]
assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36]
assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36]
wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9]
wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}]
wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}]
wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}]
wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}]
wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}]
wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56]
wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70]
assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}]
wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51]
wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55]
wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52]
wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}]
wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}]
assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38]
assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91]
wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42]
wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70]
wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}]
assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}]
assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41]
wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42]
assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}]
assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41]
assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51]
assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41]
assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41]
assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}]
assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41]
wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42]
wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53]
wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53]
wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89]
wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79]
assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41]
wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42]
assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 9'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}]
assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41]
wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32]
wire [3:0] evict; // @[MSHR.scala:314:26]
wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32]
wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32]
wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32]
assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32]
assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39]
wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39]
assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39]
assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76]
wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76]
assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76]
assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32]
assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] before_0; // @[MSHR.scala:314:26]
wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32]
wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11]
assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] after; // @[MSHR.scala:314:26]
wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26]
wire _after_T; // @[MSHR.scala:317:26]
assign _after_T = _GEN_9; // @[MSHR.scala:317:26]
wire _prior_T; // @[MSHR.scala:317:26]
assign _prior_T = _GEN_9; // @[MSHR.scala:317:26]
wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32]
wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26]
wire _after_T_1; // @[MSHR.scala:317:26]
assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire _prior_T_1; // @[MSHR.scala:317:26]
assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32]
wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32]
assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32]
assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39]
wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39]
assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39]
assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76]
wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76]
assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76]
assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26]
wire _after_T_3; // @[MSHR.scala:317:26]
assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26]
wire _prior_T_3; // @[MSHR.scala:317:26]
assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26]
assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire probe_bit = io_sinkc_bits_source_0 == 6'h28; // @[Parameters.scala:46:9]
wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9]
wire _last_probe_T; // @[MSHR.scala:459:33]
assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33]
wire _probes_done_T; // @[MSHR.scala:467:32]
assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32]
wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}]
wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11]
wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43]
wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75]
wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9]
wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}]
wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53]
wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}]
wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42]
wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55]
wire _w_rprobeacklast_T; // @[MSHR.scala:471:55]
assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55]
wire _w_pprobeacklast_T; // @[MSHR.scala:473:55]
assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55]
wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}]
wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42]
wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}]
wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77]
wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}]
wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}]
wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32]
wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33]
wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}]
wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35]
wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40]
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [8:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [3:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [8:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [10:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12]
wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _new_needT_T_2; // @[Parameters.scala:270:13]
assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13]
wire _new_skipProbe_T_5; // @[Parameters.scala:279:117]
assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117]
wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42]
wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _new_needT_T_6; // @[Parameters.scala:271:14]
assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14]
wire _new_skipProbe_T; // @[Parameters.scala:279:12]
assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12]
wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52]
wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89]
wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire new_clientBit = new_request_source == 6'h28; // @[Parameters.scala:46:9]
wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}]
wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}]
wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}]
wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9]
wire [3:0] prior; // @[MSHR.scala:314:26]
wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32]
wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28] |
Generate the Verilog code corresponding to the following Chisel files.
File ExecutionUnit.scala:
package saturn.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.tile._
import saturn.common._
class ExecutionUnit(genFUs: Seq[FunctionalUnitFactory])(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val fus = genFUs.map(gen => Module(gen.generate(p)))
val pipe_fus: Seq[PipelinedFunctionalUnit] = fus.collect { case p: PipelinedFunctionalUnit => p }
val iter_fus: Seq[IterativeFunctionalUnit] = fus.collect { case i: IterativeFunctionalUnit => i }
val pipe_depth = (pipe_fus.map(_.depth) :+ 0).max
val io = IO(new Bundle {
val iss = Flipped(Decoupled(new ExecuteMicroOp))
val iter_hazards = Output(Vec(iter_fus.size, Valid(new PipeHazard(pipe_depth))))
val iter_write = Decoupled(new VectorWrite(dLen))
val pipe_write = Output(Valid(new VectorWrite(dLen)))
val acc_write = Output(Valid(new VectorWrite(dLen)))
val scalar_write = Decoupled(new ScalarWrite)
val pipe_hazards = Output(Vec(pipe_depth, Valid(new PipeHazard(pipe_depth))))
val issue_pipe_latency = Output(UInt((log2Ceil(pipe_depth) + 1).W))
val shared_fp_req = Decoupled(new FPInput())
val shared_fp_resp = Flipped(Valid(new FPResult()))
val set_vxsat = Output(Bool())
val set_fflags = Output(Valid(UInt(5.W)))
val busy = Output(Bool())
})
val sharedFPUnits = fus.collect { case fp: HasSharedFPUIO => fp }
val hasSharedFPUnits = sharedFPUnits.size > 0
io.shared_fp_req.valid := false.B
io.shared_fp_req.bits := DontCare
if (sharedFPUnits.size > 0) {
val shared_fp_arb = Module(new Arbiter(new FPInput, sharedFPUnits.size))
for ((u, i) <- sharedFPUnits.zipWithIndex) {
val otherUnits = sharedFPUnits.zipWithIndex.filter(_._2 != i).map(_._1)
val other_busy = otherUnits.map(_.io_fp_active).orR
u.io_fp_req.ready := shared_fp_arb.io.in(i).ready && !other_busy
shared_fp_arb.io.in(i).valid := u.io_fp_req.valid && !other_busy
shared_fp_arb.io.in(i).bits := u.io_fp_req.bits
u.io_fp_resp := io.shared_fp_resp
}
io.shared_fp_req <> shared_fp_arb.io.out
}
val pipe_stall = WireInit(false.B)
fus.foreach { fu =>
fu.io.iss.op := io.iss.bits
fu.io.iss.valid := io.iss.valid && !pipe_stall
}
val pipe_write_hazard = WireInit(false.B)
val readies = fus.map(_.io.iss.ready)
io.iss.ready := readies.orR && !pipe_write_hazard && !pipe_stall
when (io.iss.valid) { assert(PopCount(readies) <= 1.U) }
io.issue_pipe_latency := Mux1H(pipe_fus.map(_.io.iss.ready), pipe_fus.map(_.depth.U))
val pipe_write = WireInit(false.B)
io.pipe_write.valid := false.B
io.pipe_write.bits := DontCare
io.iter_write.valid := false.B
io.iter_write.bits := DontCare
io.acc_write.valid := false.B
io.acc_write.bits := DontCare
io.busy := false.B
io.set_vxsat := fus.map(_.io.set_vxsat).orR
io.set_fflags.valid := fus.map(_.io.set_fflags.valid).orR
io.set_fflags.bits := fus.map(f => Mux(f.io.set_fflags.valid, f.io.set_fflags.bits, 0.U)).reduce(_|_)
val scalar_write_arb = Module(new Arbiter(new ScalarWrite, fus.size))
scalar_write_arb.io.in.zip(fus.map(_.io.scalar_write)).foreach { case (l, r) => l <> r }
io.scalar_write <> scalar_write_arb.io.out
if (pipe_fus.size > 0) {
val pipe_iss_depth = Mux1H(pipe_fus.map(_.io.iss.ready), pipe_fus.map(_.depth.U))
val pipe_valids = Seq.fill(pipe_depth)(RegInit(false.B))
val pipe_sels = Seq.fill(pipe_depth)(Reg(UInt(pipe_fus.size.W)))
val pipe_bits = Seq.fill(pipe_depth)(Reg(new ExecuteMicroOp))
val pipe_latencies = Seq.fill(pipe_depth)(Reg(UInt(log2Ceil(pipe_depth).W)))
pipe_stall := Mux1H(pipe_sels.head, pipe_fus.map(_.io.pipe0_stall))
pipe_write_hazard := (0 until pipe_depth).map { i =>
pipe_valids(i) && pipe_latencies(i) === pipe_iss_depth
}.orR
val pipe_iss = io.iss.fire && pipe_fus.map(_.io.iss.ready).orR
when (!pipe_stall) {
pipe_valids.head := pipe_iss
when (pipe_iss) {
pipe_bits.head := io.iss.bits
pipe_latencies.head := pipe_iss_depth - 1.U
pipe_sels.head := VecInit(pipe_fus.map(_.io.iss.ready)).asUInt
}
}
for (i <- 1 until pipe_depth) {
val fire = pipe_valids(i-1) && pipe_latencies(i-1) =/= 0.U && !((i == 1).B && pipe_stall)
pipe_valids(i) := fire
when (fire) {
pipe_bits(i) := pipe_bits(i-1)
pipe_latencies(i) := pipe_latencies(i-1) - 1.U
pipe_sels(i) := pipe_sels(i-1)
}
}
for ((fu, j) <- pipe_fus.zipWithIndex) {
for (i <- 0 until fu.depth) {
fu.io.pipe(i).valid := pipe_valids(i) && pipe_sels(i)(j)
fu.io.pipe(i).bits := Mux(pipe_valids(i) && pipe_sels(i)(j),
pipe_bits(i), 0.U.asTypeOf(new ExecuteMicroOp))
}
}
val write_sel = pipe_valids.zip(pipe_latencies).map { case (v,l) => v && l === 0.U }
val fu_sel = Mux1H(write_sel, pipe_sels)
pipe_write := write_sel.orR
when (write_sel.orR) {
val acc = Mux1H(write_sel, pipe_bits.map(_.acc))
val tail = Mux1H(write_sel, pipe_bits.map(_.tail))
io.pipe_write.valid := Mux1H(fu_sel, pipe_fus.map(_.io.write.valid)) && (!acc || tail)
io.pipe_write.bits := Mux1H(fu_sel, pipe_fus.map(_.io.write.bits))
io.acc_write.valid := acc && !tail
io.acc_write.bits := Mux1H(fu_sel, pipe_fus.map(_.io.write.bits))
}
when (pipe_valids.orR) { io.busy := true.B }
for (i <- 0 until pipe_depth) {
io.pipe_hazards(i).valid := pipe_valids(i)
io.pipe_hazards(i).bits.eg := pipe_bits(i).wvd_eg
when (pipe_latencies(i) === 0.U) { // hack to deal with compress unit
io.pipe_hazards(i).bits.eg := Mux1H(pipe_sels(i), pipe_fus.map(_.io.write.bits.eg))
}
io.pipe_hazards(i).bits.latency := pipe_latencies(i)
}
}
if (iter_fus.size > 0) {
val iter_write_arb = Module(new Arbiter(new VectorWrite(dLen), iter_fus.size))
iter_write_arb.io.in.zip(iter_fus.map(_.io.write)).foreach { case (l,r) => l <> r }
iter_write_arb.io.out.ready := !pipe_write && io.iter_write.ready
val acc = Mux1H(iter_write_arb.io.in.map(_.fire), iter_fus.map(_.io.acc))
val tail = Mux1H(iter_write_arb.io.in.map(_.fire), iter_fus.map(_.io.tail))
io.iter_write.valid := iter_write_arb.io.out.valid && (!acc || tail) && !pipe_write
io.iter_write.bits.eg := iter_write_arb.io.out.bits.eg
io.iter_write.bits.mask := iter_write_arb.io.out.bits.mask
io.iter_write.bits.data := iter_write_arb.io.out.bits.data
when (!pipe_write) {
io.acc_write.valid := iter_write_arb.io.out.valid && acc
io.acc_write.bits.eg := Mux1H(iter_write_arb.io.in.map(_.fire), iter_fus.map(_.io.write.bits.eg))
io.acc_write.bits.data := Mux1H(iter_write_arb.io.in.map(_.fire), iter_fus.map(_.io.write.bits.data))
io.acc_write.bits.mask := Mux1H(iter_write_arb.io.in.map(_.fire), iter_fus.map(_.io.write.bits.mask))
}
when (iter_fus.map(_.io.busy).orR) { io.busy := true.B }
for (i <- 0 until iter_fus.size) {
io.iter_hazards(i) := iter_fus(i).io.hazard
}
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module ExecutionUnit( // @[ExecutionUnit.scala:11:7]
input clock, // @[ExecutionUnit.scala:11:7]
input reset, // @[ExecutionUnit.scala:11:7]
output io_iss_ready, // @[ExecutionUnit.scala:19:14]
input io_iss_valid, // @[ExecutionUnit.scala:19:14]
input [6:0] io_iss_bits_eidx, // @[ExecutionUnit.scala:19:14]
input [7:0] io_iss_bits_vl, // @[ExecutionUnit.scala:19:14]
input [63:0] io_iss_bits_rvs1_data, // @[ExecutionUnit.scala:19:14]
input [63:0] io_iss_bits_rvs2_data, // @[ExecutionUnit.scala:19:14]
input [63:0] io_iss_bits_rvm_data, // @[ExecutionUnit.scala:19:14]
input [63:0] io_iss_bits_rvs1_elem, // @[ExecutionUnit.scala:19:14]
input [63:0] io_iss_bits_rvs2_elem, // @[ExecutionUnit.scala:19:14]
input [63:0] io_iss_bits_rvd_elem, // @[ExecutionUnit.scala:19:14]
input [1:0] io_iss_bits_rvs1_eew, // @[ExecutionUnit.scala:19:14]
input [1:0] io_iss_bits_rvs2_eew, // @[ExecutionUnit.scala:19:14]
input [1:0] io_iss_bits_rvd_eew, // @[ExecutionUnit.scala:19:14]
input [1:0] io_iss_bits_vd_eew, // @[ExecutionUnit.scala:19:14]
input [7:0] io_iss_bits_rmask, // @[ExecutionUnit.scala:19:14]
input [7:0] io_iss_bits_wmask, // @[ExecutionUnit.scala:19:14]
input [63:0] io_iss_bits_full_tail_mask, // @[ExecutionUnit.scala:19:14]
input [5:0] io_iss_bits_wvd_eg, // @[ExecutionUnit.scala:19:14]
input [2:0] io_iss_bits_funct3, // @[ExecutionUnit.scala:19:14]
input [5:0] io_iss_bits_funct6, // @[ExecutionUnit.scala:19:14]
input [4:0] io_iss_bits_rs1, // @[ExecutionUnit.scala:19:14]
input [4:0] io_iss_bits_rs2, // @[ExecutionUnit.scala:19:14]
input [4:0] io_iss_bits_rd, // @[ExecutionUnit.scala:19:14]
input io_iss_bits_vm, // @[ExecutionUnit.scala:19:14]
input io_iss_bits_head, // @[ExecutionUnit.scala:19:14]
input io_iss_bits_tail, // @[ExecutionUnit.scala:19:14]
input io_iss_bits_acc, // @[ExecutionUnit.scala:19:14]
input [2:0] io_iss_bits_rm, // @[ExecutionUnit.scala:19:14]
output io_iter_hazards_0_valid, // @[ExecutionUnit.scala:19:14]
output [5:0] io_iter_hazards_0_bits_eg, // @[ExecutionUnit.scala:19:14]
output io_iter_hazards_1_valid, // @[ExecutionUnit.scala:19:14]
output [5:0] io_iter_hazards_1_bits_eg, // @[ExecutionUnit.scala:19:14]
input io_iter_write_ready, // @[ExecutionUnit.scala:19:14]
output io_iter_write_valid, // @[ExecutionUnit.scala:19:14]
output [5:0] io_iter_write_bits_eg, // @[ExecutionUnit.scala:19:14]
output [63:0] io_iter_write_bits_data, // @[ExecutionUnit.scala:19:14]
output [63:0] io_iter_write_bits_mask, // @[ExecutionUnit.scala:19:14]
output io_pipe_write_valid, // @[ExecutionUnit.scala:19:14]
output [5:0] io_pipe_write_bits_eg, // @[ExecutionUnit.scala:19:14]
output [63:0] io_pipe_write_bits_data, // @[ExecutionUnit.scala:19:14]
output [63:0] io_pipe_write_bits_mask, // @[ExecutionUnit.scala:19:14]
output io_acc_write_valid, // @[ExecutionUnit.scala:19:14]
output [63:0] io_acc_write_bits_data, // @[ExecutionUnit.scala:19:14]
output [63:0] io_acc_write_bits_mask, // @[ExecutionUnit.scala:19:14]
input io_scalar_write_ready, // @[ExecutionUnit.scala:19:14]
output io_scalar_write_valid, // @[ExecutionUnit.scala:19:14]
output [63:0] io_scalar_write_bits_data, // @[ExecutionUnit.scala:19:14]
output io_scalar_write_bits_fp, // @[ExecutionUnit.scala:19:14]
output [1:0] io_scalar_write_bits_size, // @[ExecutionUnit.scala:19:14]
output [4:0] io_scalar_write_bits_rd, // @[ExecutionUnit.scala:19:14]
output io_pipe_hazards_0_valid, // @[ExecutionUnit.scala:19:14]
output [5:0] io_pipe_hazards_0_bits_eg, // @[ExecutionUnit.scala:19:14]
output io_pipe_hazards_1_valid, // @[ExecutionUnit.scala:19:14]
output [5:0] io_pipe_hazards_1_bits_eg, // @[ExecutionUnit.scala:19:14]
output io_pipe_hazards_2_valid, // @[ExecutionUnit.scala:19:14]
output [5:0] io_pipe_hazards_2_bits_eg, // @[ExecutionUnit.scala:19:14]
output io_pipe_hazards_3_valid, // @[ExecutionUnit.scala:19:14]
output [5:0] io_pipe_hazards_3_bits_eg, // @[ExecutionUnit.scala:19:14]
input io_shared_fp_req_ready, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_valid, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_bits_ren2, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_bits_ren3, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_bits_swap23, // @[ExecutionUnit.scala:19:14]
output [1:0] io_shared_fp_req_bits_typeTagIn, // @[ExecutionUnit.scala:19:14]
output [1:0] io_shared_fp_req_bits_typeTagOut, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_bits_fromint, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_bits_toint, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_bits_fastpipe, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_bits_fma, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_bits_div, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_bits_sqrt, // @[ExecutionUnit.scala:19:14]
output io_shared_fp_req_bits_wflags, // @[ExecutionUnit.scala:19:14]
output [2:0] io_shared_fp_req_bits_rm, // @[ExecutionUnit.scala:19:14]
output [1:0] io_shared_fp_req_bits_fmaCmd, // @[ExecutionUnit.scala:19:14]
output [1:0] io_shared_fp_req_bits_typ, // @[ExecutionUnit.scala:19:14]
output [64:0] io_shared_fp_req_bits_in1, // @[ExecutionUnit.scala:19:14]
output [64:0] io_shared_fp_req_bits_in2, // @[ExecutionUnit.scala:19:14]
output [64:0] io_shared_fp_req_bits_in3, // @[ExecutionUnit.scala:19:14]
input io_shared_fp_resp_valid, // @[ExecutionUnit.scala:19:14]
input [64:0] io_shared_fp_resp_bits_data, // @[ExecutionUnit.scala:19:14]
output io_set_vxsat, // @[ExecutionUnit.scala:19:14]
output io_busy // @[ExecutionUnit.scala:19:14]
);
wire pipe_write_hazard; // @[package.scala:81:59]
wire _pipe_stall_T_13; // @[Mux.scala:30:73]
wire _iter_write_arb_io_in_0_ready; // @[ExecutionUnit.scala:152:32]
wire _iter_write_arb_io_in_1_ready; // @[ExecutionUnit.scala:152:32]
wire _iter_write_arb_io_out_valid; // @[ExecutionUnit.scala:152:32]
wire _scalar_write_arb_io_in_4_ready; // @[ExecutionUnit.scala:84:32]
wire _shared_fp_arb_io_in_0_ready; // @[ExecutionUnit.scala:44:31]
wire _shared_fp_arb_io_in_1_ready; // @[ExecutionUnit.scala:44:31]
wire _fus_8_io_iss_ready; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_write_valid; // @[ExecutionUnit.scala:12:37]
wire [5:0] _fus_8_io_write_bits_eg; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_8_io_write_bits_data; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_8_io_write_bits_mask; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_acc; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_tail; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_busy; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_fp_req_valid; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_fp_req_bits_ren2; // @[ExecutionUnit.scala:12:37]
wire [1:0] _fus_8_io_fp_req_bits_typeTagIn; // @[ExecutionUnit.scala:12:37]
wire [1:0] _fus_8_io_fp_req_bits_typeTagOut; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_fp_req_bits_fromint; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_fp_req_bits_toint; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_fp_req_bits_fastpipe; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_fp_req_bits_div; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_fp_req_bits_sqrt; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_fp_req_bits_wflags; // @[ExecutionUnit.scala:12:37]
wire [2:0] _fus_8_io_fp_req_bits_rm; // @[ExecutionUnit.scala:12:37]
wire [1:0] _fus_8_io_fp_req_bits_typ; // @[ExecutionUnit.scala:12:37]
wire [64:0] _fus_8_io_fp_req_bits_in1; // @[ExecutionUnit.scala:12:37]
wire [64:0] _fus_8_io_fp_req_bits_in2; // @[ExecutionUnit.scala:12:37]
wire _fus_8_io_fp_active; // @[ExecutionUnit.scala:12:37]
wire _fus_7_io_iss_ready; // @[ExecutionUnit.scala:12:37]
wire _fus_7_io_write_valid; // @[ExecutionUnit.scala:12:37]
wire [5:0] _fus_7_io_write_bits_eg; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_7_io_write_bits_data; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_7_io_write_bits_mask; // @[ExecutionUnit.scala:12:37]
wire _fus_7_io_pipe0_stall; // @[ExecutionUnit.scala:12:37]
wire _fus_7_io_fp_req_valid; // @[ExecutionUnit.scala:12:37]
wire _fus_7_io_fp_req_bits_ren3; // @[ExecutionUnit.scala:12:37]
wire _fus_7_io_fp_req_bits_swap23; // @[ExecutionUnit.scala:12:37]
wire [1:0] _fus_7_io_fp_req_bits_typeTagIn; // @[ExecutionUnit.scala:12:37]
wire [1:0] _fus_7_io_fp_req_bits_typeTagOut; // @[ExecutionUnit.scala:12:37]
wire [2:0] _fus_7_io_fp_req_bits_rm; // @[ExecutionUnit.scala:12:37]
wire [1:0] _fus_7_io_fp_req_bits_fmaCmd; // @[ExecutionUnit.scala:12:37]
wire [64:0] _fus_7_io_fp_req_bits_in1; // @[ExecutionUnit.scala:12:37]
wire [64:0] _fus_7_io_fp_req_bits_in2; // @[ExecutionUnit.scala:12:37]
wire [64:0] _fus_7_io_fp_req_bits_in3; // @[ExecutionUnit.scala:12:37]
wire _fus_7_io_fp_active; // @[ExecutionUnit.scala:12:37]
wire _fus_6_io_iss_ready; // @[ExecutionUnit.scala:12:37]
wire _fus_6_io_set_vxsat; // @[ExecutionUnit.scala:12:37]
wire _fus_6_io_write_valid; // @[ExecutionUnit.scala:12:37]
wire [5:0] _fus_6_io_write_bits_eg; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_6_io_write_bits_data; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_6_io_write_bits_mask; // @[ExecutionUnit.scala:12:37]
wire _fus_5_io_iss_ready; // @[ExecutionUnit.scala:12:37]
wire _fus_5_io_write_valid; // @[ExecutionUnit.scala:12:37]
wire [5:0] _fus_5_io_write_bits_eg; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_5_io_write_bits_data; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_5_io_write_bits_mask; // @[ExecutionUnit.scala:12:37]
wire _fus_4_io_iss_ready; // @[ExecutionUnit.scala:12:37]
wire _fus_4_io_scalar_write_valid; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_4_io_scalar_write_bits_data; // @[ExecutionUnit.scala:12:37]
wire _fus_4_io_scalar_write_bits_fp; // @[ExecutionUnit.scala:12:37]
wire [1:0] _fus_4_io_scalar_write_bits_size; // @[ExecutionUnit.scala:12:37]
wire [4:0] _fus_4_io_scalar_write_bits_rd; // @[ExecutionUnit.scala:12:37]
wire _fus_4_io_write_valid; // @[ExecutionUnit.scala:12:37]
wire [5:0] _fus_4_io_write_bits_eg; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_4_io_write_bits_data; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_4_io_write_bits_mask; // @[ExecutionUnit.scala:12:37]
wire _fus_3_io_iss_ready; // @[ExecutionUnit.scala:12:37]
wire _fus_3_io_write_valid; // @[ExecutionUnit.scala:12:37]
wire [5:0] _fus_3_io_write_bits_eg; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_3_io_write_bits_data; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_3_io_write_bits_mask; // @[ExecutionUnit.scala:12:37]
wire _fus_3_io_busy; // @[ExecutionUnit.scala:12:37]
wire _fus_2_io_iss_ready; // @[ExecutionUnit.scala:12:37]
wire _fus_2_io_write_valid; // @[ExecutionUnit.scala:12:37]
wire [5:0] _fus_2_io_write_bits_eg; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_2_io_write_bits_data; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_2_io_write_bits_mask; // @[ExecutionUnit.scala:12:37]
wire _fus_1_io_iss_ready; // @[ExecutionUnit.scala:12:37]
wire _fus_1_io_set_vxsat; // @[ExecutionUnit.scala:12:37]
wire _fus_1_io_write_valid; // @[ExecutionUnit.scala:12:37]
wire [5:0] _fus_1_io_write_bits_eg; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_1_io_write_bits_data; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_1_io_write_bits_mask; // @[ExecutionUnit.scala:12:37]
wire _fus_0_io_iss_ready; // @[ExecutionUnit.scala:12:37]
wire _fus_0_io_set_vxsat; // @[ExecutionUnit.scala:12:37]
wire _fus_0_io_write_valid; // @[ExecutionUnit.scala:12:37]
wire [5:0] _fus_0_io_write_bits_eg; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_0_io_write_bits_data; // @[ExecutionUnit.scala:12:37]
wire [63:0] _fus_0_io_write_bits_mask; // @[ExecutionUnit.scala:12:37]
wire _pipe_iss_T_1 = _fus_0_io_iss_ready | _fus_1_io_iss_ready; // @[ExecutionUnit.scala:12:37]
wire io_iss_ready_0 = (_pipe_iss_T_1 | _fus_2_io_iss_ready | _fus_3_io_iss_ready | _fus_4_io_iss_ready | _fus_5_io_iss_ready | _fus_6_io_iss_ready | _fus_7_io_iss_ready | _fus_8_io_iss_ready) & ~pipe_write_hazard & ~_pipe_stall_T_13; // @[Mux.scala:30:73] |
Generate the Verilog code corresponding to the following Chisel files.
File DCEQueue.scala:
package saturn.common
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import freechips.rocketchip.rocket._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.rocket.Instructions._
class DCEQueue[T <: Data](
val gen: T,
val entries: Int,
val pipe: Boolean = false,
val flow: Boolean = false)(implicit val p: Parameters) extends Module {
require(entries > -1, "Queue must have non-negative number of entries")
require(entries != 0, "Use companion object Queue.apply for zero entries")
val io = IO(new QueueIO(gen, entries, false) {
val peek = Output(Vec(entries, Valid(gen)))
})
val valids = RegInit(VecInit.fill(entries)(false.B))
val ram = Reg(Vec(entries, gen))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
val empty = ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireDefault(io.enq.fire)
val do_deq = WireDefault(io.deq.fire)
for (i <- 0 until entries) {
io.peek(i).bits := ram(i)
io.peek(i).valid := valids(i)
}
when(do_deq) {
deq_ptr.inc()
valids(deq_ptr.value) := false.B
}
when(do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
enq_ptr.inc()
}
when(do_enq =/= do_deq) {
maybe_full := do_enq
}
io.deq.valid := !empty
io.enq.ready := !full
io.deq.bits := ram(deq_ptr.value)
if (flow) {
when(io.enq.valid) { io.deq.valid := true.B }
when(empty) {
io.deq.bits := io.enq.bits
do_deq := false.B
when(io.deq.ready) { do_enq := false.B }
}
}
if (pipe) {
when(io.deq.ready) { io.enq.ready := true.B }
}
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Mux(maybe_full && ptr_match, entries.U, 0.U) | ptr_diff
} else {
io.count := Mux(
ptr_match,
Mux(maybe_full, entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)
)
}
}
| module DCEQueue_3( // @[DCEQueue.scala:12:7]
input clock, // @[DCEQueue.scala:12:7]
input reset, // @[DCEQueue.scala:12:7]
output io_enq_ready, // @[DCEQueue.scala:20:14]
input io_enq_valid, // @[DCEQueue.scala:20:14]
input [39:0] io_enq_bits_addr, // @[DCEQueue.scala:20:14]
input [63:0] io_enq_bits_data, // @[DCEQueue.scala:20:14]
input [7:0] io_enq_bits_mask, // @[DCEQueue.scala:20:14]
input [3:0] io_enq_bits_tag, // @[DCEQueue.scala:20:14]
input io_deq_ready, // @[DCEQueue.scala:20:14]
output io_deq_valid, // @[DCEQueue.scala:20:14]
output [39:0] io_deq_bits_addr, // @[DCEQueue.scala:20:14]
output [63:0] io_deq_bits_data, // @[DCEQueue.scala:20:14]
output [7:0] io_deq_bits_mask, // @[DCEQueue.scala:20:14]
output [3:0] io_deq_bits_tag // @[DCEQueue.scala:20:14]
);
reg [39:0] ram_0_addr; // @[DCEQueue.scala:24:16]
reg [63:0] ram_0_data; // @[DCEQueue.scala:24:16]
reg [7:0] ram_0_mask; // @[DCEQueue.scala:24:16]
reg [3:0] ram_0_tag; // @[DCEQueue.scala:24:16]
reg [39:0] ram_1_addr; // @[DCEQueue.scala:24:16]
reg [63:0] ram_1_data; // @[DCEQueue.scala:24:16]
reg [7:0] ram_1_mask; // @[DCEQueue.scala:24:16]
reg [3:0] ram_1_tag; // @[DCEQueue.scala:24:16]
reg wrap_1; // @[Counter.scala:61:40]
reg wrap; // @[Counter.scala:61:40]
reg maybe_full; // @[DCEQueue.scala:27:27]
wire ptr_match = wrap_1 == wrap; // @[Counter.scala:61:40]
wire empty = ptr_match & ~maybe_full; // @[DCEQueue.scala:27:27, :28:33, :29:{25,28}]
wire full = ptr_match & maybe_full; // @[DCEQueue.scala:27:27, :28:33, :30:24]
wire do_deq = io_deq_ready & ~empty; // @[Decoupled.scala:51:35]
wire do_enq = ~full & io_enq_valid; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[DCEQueue.scala:12:7]
if (do_enq & ~wrap_1) begin // @[Decoupled.scala:51:35]
ram_0_addr <= io_enq_bits_addr; // @[DCEQueue.scala:24:16]
ram_0_data <= io_enq_bits_data; // @[DCEQueue.scala:24:16]
ram_0_mask <= io_enq_bits_mask; // @[DCEQueue.scala:24:16]
ram_0_tag <= io_enq_bits_tag; // @[DCEQueue.scala:24:16]
end
if (do_enq & wrap_1) begin // @[Decoupled.scala:51:35]
ram_1_addr <= io_enq_bits_addr; // @[DCEQueue.scala:24:16]
ram_1_data <= io_enq_bits_data; // @[DCEQueue.scala:24:16]
ram_1_mask <= io_enq_bits_mask; // @[DCEQueue.scala:24:16]
ram_1_tag <= io_enq_bits_tag; // @[DCEQueue.scala:24:16]
end
if (reset) begin // @[DCEQueue.scala:12:7]
wrap_1 <= 1'h0; // @[Counter.scala:61:40]
wrap <= 1'h0; // @[Counter.scala:61:40]
maybe_full <= 1'h0; // @[DCEQueue.scala:27:27]
end
else begin // @[DCEQueue.scala:12:7]
if (do_enq) // @[Decoupled.scala:51:35]
wrap_1 <= wrap_1 - 1'h1; // @[Counter.scala:61:40, :77:24]
if (do_deq) // @[Decoupled.scala:51:35]
wrap <= wrap - 1'h1; // @[Counter.scala:61:40, :77:24]
if (~(do_enq == do_deq)) // @[Decoupled.scala:51:35]
maybe_full <= do_enq; // @[Decoupled.scala:51:35]
end
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module IntSyncSyncCrossingSink_n1x2_1(); // @[Crossing.scala:96:9]
wire auto_in_sync_0 = 1'h0; // @[Crossing.scala:96:9]
wire auto_in_sync_1 = 1'h0; // @[Crossing.scala:96:9]
wire auto_out_0 = 1'h0; // @[Crossing.scala:96:9]
wire auto_out_1 = 1'h0; // @[Crossing.scala:96:9]
wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire nodeIn_sync_0 = 1'h0; // @[MixedNode.scala:551:17]
wire nodeIn_sync_1 = 1'h0; // @[MixedNode.scala:551:17]
wire nodeOut_0 = 1'h0; // @[MixedNode.scala:542:17]
wire nodeOut_1 = 1'h0; // @[MixedNode.scala:542:17]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File Nodes.scala:
package constellation.channel
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.diplomacy._
case class EmptyParams()
case class ChannelEdgeParams(cp: ChannelParams, p: Parameters)
object ChannelImp extends SimpleNodeImp[EmptyParams, ChannelParams, ChannelEdgeParams, Channel] {
def edge(pd: EmptyParams, pu: ChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
ChannelEdgeParams(pu, p)
}
def bundle(e: ChannelEdgeParams) = new Channel(e.cp)(e.p)
def render(e: ChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#0000ff", label = e.cp.payloadBits.toString)
}
override def monitor(bundle: Channel, edge: ChannelEdgeParams): Unit = {
val monitor = Module(new NoCMonitor(edge.cp)(edge.p))
monitor.io.in := bundle
}
// TODO: Add nodepath stuff? override def mixO, override def mixI
}
case class ChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(ChannelImp)(Seq(EmptyParams()))
case class ChannelDestNode(val destParams: ChannelParams)(implicit valName: ValName) extends SinkNode(ChannelImp)(Seq(destParams))
case class ChannelAdapterNode(
slaveFn: ChannelParams => ChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(ChannelImp)((e: EmptyParams) => e, slaveFn)
case class ChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(ChannelImp)()
case class ChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(ChannelImp)()
case class IngressChannelEdgeParams(cp: IngressChannelParams, p: Parameters)
case class EgressChannelEdgeParams(cp: EgressChannelParams, p: Parameters)
object IngressChannelImp extends SimpleNodeImp[EmptyParams, IngressChannelParams, IngressChannelEdgeParams, IngressChannel] {
def edge(pd: EmptyParams, pu: IngressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
IngressChannelEdgeParams(pu, p)
}
def bundle(e: IngressChannelEdgeParams) = new IngressChannel(e.cp)(e.p)
def render(e: IngressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#00ff00", label = e.cp.payloadBits.toString)
}
}
object EgressChannelImp extends SimpleNodeImp[EmptyParams, EgressChannelParams, EgressChannelEdgeParams, EgressChannel] {
def edge(pd: EmptyParams, pu: EgressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
EgressChannelEdgeParams(pu, p)
}
def bundle(e: EgressChannelEdgeParams) = new EgressChannel(e.cp)(e.p)
def render(e: EgressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#ff0000", label = e.cp.payloadBits.toString)
}
}
case class IngressChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(IngressChannelImp)(Seq(EmptyParams()))
case class IngressChannelDestNode(val destParams: IngressChannelParams)(implicit valName: ValName) extends SinkNode(IngressChannelImp)(Seq(destParams))
case class EgressChannelSourceNode(val egressId: Int)(implicit valName: ValName) extends SourceNode(EgressChannelImp)(Seq(EmptyParams()))
case class EgressChannelDestNode(val destParams: EgressChannelParams)(implicit valName: ValName) extends SinkNode(EgressChannelImp)(Seq(destParams))
case class IngressChannelAdapterNode(
slaveFn: IngressChannelParams => IngressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(IngressChannelImp)(m => m, slaveFn)
case class EgressChannelAdapterNode(
slaveFn: EgressChannelParams => EgressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(EgressChannelImp)(m => m, slaveFn)
case class IngressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(IngressChannelImp)()
case class EgressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(EgressChannelImp)()
case class IngressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(IngressChannelImp)()
case class EgressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(EgressChannelImp)()
File Router.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{RoutingRelation}
import constellation.noc.{HasNoCParams}
case class UserRouterParams(
// Payload width. Must match payload width on all channels attached to this routing node
payloadBits: Int = 64,
// Combines SA and ST stages (removes pipeline register)
combineSAST: Boolean = false,
// Combines RC and VA stages (removes pipeline register)
combineRCVA: Boolean = false,
// Adds combinational path from SA to VA
coupleSAVA: Boolean = false,
vcAllocator: VCAllocatorParams => Parameters => VCAllocator = (vP) => (p) => new RotatingSingleVCAllocator(vP)(p)
)
case class RouterParams(
nodeId: Int,
nIngress: Int,
nEgress: Int,
user: UserRouterParams
)
trait HasRouterOutputParams {
def outParams: Seq[ChannelParams]
def egressParams: Seq[EgressChannelParams]
def allOutParams = outParams ++ egressParams
def nOutputs = outParams.size
def nEgress = egressParams.size
def nAllOutputs = allOutParams.size
}
trait HasRouterInputParams {
def inParams: Seq[ChannelParams]
def ingressParams: Seq[IngressChannelParams]
def allInParams = inParams ++ ingressParams
def nInputs = inParams.size
def nIngress = ingressParams.size
def nAllInputs = allInParams.size
}
trait HasRouterParams
{
def routerParams: RouterParams
def nodeId = routerParams.nodeId
def payloadBits = routerParams.user.payloadBits
}
class DebugBundle(val nIn: Int) extends Bundle {
val va_stall = Vec(nIn, UInt())
val sa_stall = Vec(nIn, UInt())
}
class Router(
val routerParams: RouterParams,
preDiplomaticInParams: Seq[ChannelParams],
preDiplomaticIngressParams: Seq[IngressChannelParams],
outDests: Seq[Int],
egressIds: Seq[Int]
)(implicit p: Parameters) extends LazyModule with HasNoCParams with HasRouterParams {
val allPreDiplomaticInParams = preDiplomaticInParams ++ preDiplomaticIngressParams
val destNodes = preDiplomaticInParams.map(u => ChannelDestNode(u))
val sourceNodes = outDests.map(u => ChannelSourceNode(u))
val ingressNodes = preDiplomaticIngressParams.map(u => IngressChannelDestNode(u))
val egressNodes = egressIds.map(u => EgressChannelSourceNode(u))
val debugNode = BundleBridgeSource(() => new DebugBundle(allPreDiplomaticInParams.size))
val ctrlNode = if (hasCtrl) Some(BundleBridgeSource(() => new RouterCtrlBundle)) else None
def inParams = module.inParams
def outParams = module.outParams
def ingressParams = module.ingressParams
def egressParams = module.egressParams
lazy val module = new LazyModuleImp(this) with HasRouterInputParams with HasRouterOutputParams {
val (io_in, edgesIn) = destNodes.map(_.in(0)).unzip
val (io_out, edgesOut) = sourceNodes.map(_.out(0)).unzip
val (io_ingress, edgesIngress) = ingressNodes.map(_.in(0)).unzip
val (io_egress, edgesEgress) = egressNodes.map(_.out(0)).unzip
val io_debug = debugNode.out(0)._1
val inParams = edgesIn.map(_.cp)
val outParams = edgesOut.map(_.cp)
val ingressParams = edgesIngress.map(_.cp)
val egressParams = edgesEgress.map(_.cp)
allOutParams.foreach(u => require(u.srcId == nodeId && u.payloadBits == routerParams.user.payloadBits))
allInParams.foreach(u => require(u.destId == nodeId && u.payloadBits == routerParams.user.payloadBits))
require(nIngress == routerParams.nIngress)
require(nEgress == routerParams.nEgress)
require(nAllInputs >= 1)
require(nAllOutputs >= 1)
require(nodeId < (1 << nodeIdBits))
val input_units = inParams.zipWithIndex.map { case (u,i) =>
Module(new InputUnit(u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"input_unit_${i}_from_${u.srcId}") }
val ingress_units = ingressParams.zipWithIndex.map { case (u,i) =>
Module(new IngressUnit(i, u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"ingress_unit_${i+nInputs}_from_${u.ingressId}") }
val all_input_units = input_units ++ ingress_units
val output_units = outParams.zipWithIndex.map { case (u,i) =>
Module(new OutputUnit(inParams, ingressParams, u))
.suggestName(s"output_unit_${i}_to_${u.destId}")}
val egress_units = egressParams.zipWithIndex.map { case (u,i) =>
Module(new EgressUnit(routerParams.user.coupleSAVA && all_input_units.size == 1,
routerParams.user.combineSAST,
inParams, ingressParams, u))
.suggestName(s"egress_unit_${i+nOutputs}_to_${u.egressId}")}
val all_output_units = output_units ++ egress_units
val switch = Module(new Switch(routerParams, inParams, outParams, ingressParams, egressParams))
val switch_allocator = Module(new SwitchAllocator(routerParams, inParams, outParams, ingressParams, egressParams))
val vc_allocator = Module(routerParams.user.vcAllocator(
VCAllocatorParams(routerParams, inParams, outParams, ingressParams, egressParams)
)(p))
val route_computer = Module(new RouteComputer(routerParams, inParams, outParams, ingressParams, egressParams))
val fires_count = WireInit(PopCount(vc_allocator.io.req.map(_.fire)))
dontTouch(fires_count)
(io_in zip input_units ).foreach { case (i,u) => u.io.in <> i }
(io_ingress zip ingress_units).foreach { case (i,u) => u.io.in <> i.flit }
(output_units zip io_out ).foreach { case (u,o) => o <> u.io.out }
(egress_units zip io_egress).foreach { case (u,o) => o.flit <> u.io.out }
(route_computer.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.router_req }
(all_input_units zip route_computer.io.resp).foreach {
case (u,o) => u.io.router_resp <> o }
(vc_allocator.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.vcalloc_req }
(all_input_units zip vc_allocator.io.resp).foreach {
case (u,o) => u.io.vcalloc_resp <> o }
(all_output_units zip vc_allocator.io.out_allocs).foreach {
case (u,a) => u.io.allocs <> a }
(vc_allocator.io.channel_status zip all_output_units).foreach {
case (a,u) => a := u.io.channel_status }
all_input_units.foreach(in => all_output_units.zipWithIndex.foreach { case (out,outIdx) =>
in.io.out_credit_available(outIdx) := out.io.credit_available
})
(all_input_units zip switch_allocator.io.req).foreach {
case (u,r) => r <> u.io.salloc_req }
(all_output_units zip switch_allocator.io.credit_alloc).foreach {
case (u,a) => u.io.credit_alloc := a }
(switch.io.in zip all_input_units).foreach {
case (i,u) => i <> u.io.out }
(all_output_units zip switch.io.out).foreach {
case (u,o) => u.io.in <> o }
switch.io.sel := (if (routerParams.user.combineSAST) {
switch_allocator.io.switch_sel
} else {
RegNext(switch_allocator.io.switch_sel)
})
if (hasCtrl) {
val io_ctrl = ctrlNode.get.out(0)._1
val ctrl = Module(new RouterControlUnit(routerParams, inParams, outParams, ingressParams, egressParams))
io_ctrl <> ctrl.io.ctrl
(all_input_units zip ctrl.io.in_block ).foreach { case (l,r) => l.io.block := r }
(all_input_units zip ctrl.io.in_fire ).foreach { case (l,r) => r := l.io.out.map(_.valid) }
} else {
input_units.foreach(_.io.block := false.B)
ingress_units.foreach(_.io.block := false.B)
}
(io_debug.va_stall zip all_input_units.map(_.io.debug.va_stall)).map { case (l,r) => l := r }
(io_debug.sa_stall zip all_input_units.map(_.io.debug.sa_stall)).map { case (l,r) => l := r }
val debug_tsc = RegInit(0.U(64.W))
debug_tsc := debug_tsc + 1.U
val debug_sample = RegInit(0.U(64.W))
debug_sample := debug_sample + 1.U
val sample_rate = PlusArg("noc_util_sample_rate", width=20)
when (debug_sample === sample_rate - 1.U) { debug_sample := 0.U }
def sample(fire: Bool, s: String) = {
val util_ctr = RegInit(0.U(64.W))
val fired = RegInit(false.B)
util_ctr := util_ctr + fire
fired := fired || fire
when (sample_rate =/= 0.U && debug_sample === sample_rate - 1.U && fired) {
val fmtStr = s"nocsample %d $s %d\n"
printf(fmtStr, debug_tsc, util_ctr);
fired := fire
}
}
destNodes.map(_.in(0)).foreach { case (in, edge) => in.flit.map { f =>
sample(f.fire, s"${edge.cp.srcId} $nodeId")
} }
ingressNodes.map(_.in(0)).foreach { case (in, edge) =>
sample(in.flit.fire, s"i${edge.cp.asInstanceOf[IngressChannelParams].ingressId} $nodeId")
}
egressNodes.map(_.out(0)).foreach { case (out, edge) =>
sample(out.flit.fire, s"$nodeId e${edge.cp.asInstanceOf[EgressChannelParams].egressId}")
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module Router_4( // @[Router.scala:89:25]
input clock, // @[Router.scala:89:25]
input reset, // @[Router.scala:89:25]
output [1:0] auto_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_va_stall_3, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_va_stall_4, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_3, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_4, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_egress_nodes_out_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_1_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_1_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_ingress_nodes_in_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_ingress_nodes_in_1_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_0_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_ingress_nodes_in_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_ingress_nodes_in_0_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_2_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_source_nodes_out_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_2_credit_return, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_2_vc_free, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_1_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_source_nodes_out_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_1_credit_return, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_1_vc_free, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_0_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_source_nodes_out_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_0_credit_return, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_0_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_2_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_dest_nodes_in_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_2_credit_return, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_2_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_1_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_dest_nodes_in_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_1_credit_return, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_1_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_0_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_dest_nodes_in_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_0_credit_return, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_0_vc_free // @[LazyModuleImp.scala:107:25]
);
wire [19:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire _route_computer_io_resp_3_vc_sel_2_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_2_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_2_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_1_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_1_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_1_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_2_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_2_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_2_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_1_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_1_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_1_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_2_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_2_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_2_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_1_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_1_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_1_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_2_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_2_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_2_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_1_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_1_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_1_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_2; // @[Router.scala:136:32]
wire _vc_allocator_io_req_4_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_3_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_2_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_1_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_0_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_2_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_2_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_1_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_1_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_2_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_2_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_1_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_1_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_3_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_2_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_1_1_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_1_2_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_2_alloc; // @[Router.scala:133:30]
wire _switch_allocator_io_req_4_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_3_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_2_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_1_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_0_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_3_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_3_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_1_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_2_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_2_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_0_0; // @[Router.scala:132:34]
wire _switch_io_out_3_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_3_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_3_0_bits_tail; // @[Router.scala:131:24]
wire [144:0] _switch_io_out_3_0_bits_payload; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_3_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_3_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_2_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_tail; // @[Router.scala:131:24]
wire [144:0] _switch_io_out_2_0_bits_payload; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_2_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_2_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_2_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_2_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_2_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_2_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _switch_io_out_1_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_tail; // @[Router.scala:131:24]
wire [144:0] _switch_io_out_1_0_bits_payload; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_1_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_1_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_1_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_1_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_1_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_1_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _switch_io_out_0_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_tail; // @[Router.scala:131:24]
wire [144:0] _switch_io_out_0_0_bits_payload; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_0_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_0_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_0_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _egress_unit_3_to_5_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_3_to_5_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_3_to_5_io_out_valid; // @[Router.scala:125:13]
wire _output_unit_2_to_8_io_credit_available_0; // @[Router.scala:122:13]
wire _output_unit_2_to_8_io_channel_status_0_occupied; // @[Router.scala:122:13]
wire _output_unit_1_to_5_io_credit_available_1; // @[Router.scala:122:13]
wire _output_unit_1_to_5_io_credit_available_2; // @[Router.scala:122:13]
wire _output_unit_1_to_5_io_channel_status_1_occupied; // @[Router.scala:122:13]
wire _output_unit_1_to_5_io_channel_status_2_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_credit_available_0; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_credit_available_2; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_channel_status_0_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_0_io_channel_status_2_occupied; // @[Router.scala:122:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_bits_vc_sel_2_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_bits_vc_sel_1_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [144:0] _ingress_unit_4_from_11_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_4_from_11_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_4_from_11_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_4_from_11_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_4_from_11_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_4_from_11_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_4_from_11_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_4_from_11_io_in_ready; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_3_from_10_io_router_req_bits_flow_egress_node; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_3_from_10_io_router_req_bits_flow_egress_node_id; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_bits_vc_sel_2_2; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_bits_vc_sel_1_2; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [144:0] _ingress_unit_3_from_10_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_3_from_10_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_3_from_10_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_3_from_10_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_3_from_10_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_3_from_10_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_3_from_10_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_3_from_10_io_in_ready; // @[Router.scala:116:13]
wire [1:0] _input_unit_2_from_8_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_8_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_8_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_2_from_8_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_8_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_8_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_2_from_8_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [144:0] _input_unit_2_from_8_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_8_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_8_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_2_from_8_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_8_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_8_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_8_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_5_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_5_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_5_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_1_from_5_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_5_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_5_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_1_from_5_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [144:0] _input_unit_1_from_5_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_5_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_5_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_1_from_5_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_5_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_5_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_5_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_0_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_0_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_0_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_0_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_0_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_0_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_0_from_0_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [144:0] _input_unit_0_from_0_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_0_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_0_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_0_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_0_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_0_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_0_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [2:0] fires_count = {1'h0, {1'h0, _vc_allocator_io_req_0_ready & _input_unit_0_from_0_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_1_ready & _input_unit_1_from_5_io_vcalloc_req_valid}} + {1'h0, {1'h0, _vc_allocator_io_req_2_ready & _input_unit_2_from_8_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_3_ready & _ingress_unit_3_from_10_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_4_ready & _ingress_unit_4_from_11_io_vcalloc_req_valid}}; // @[Decoupled.scala:51:35]
reg [63:0] debug_tsc; // @[Router.scala:195:28]
reg [63:0] debug_sample; // @[Router.scala:197:31]
wire _GEN = debug_sample == {44'h0, _plusarg_reader_out - 20'h1}; // @[PlusArg.scala:80:11]
reg [63:0] util_ctr; // @[Router.scala:203:29]
reg fired; // @[Router.scala:204:26]
wire _GEN_0 = (|_plusarg_reader_out) & _GEN; // @[PlusArg.scala:80:11]
wire _GEN_1 = _GEN_0 & fired; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_1; // @[Router.scala:203:29]
reg fired_1; // @[Router.scala:204:26]
wire _GEN_2 = _GEN_0 & fired_1; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_2; // @[Router.scala:203:29]
reg fired_2; // @[Router.scala:204:26]
wire _GEN_3 = _GEN_0 & fired_2; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_3; // @[Router.scala:203:29]
reg fired_3; // @[Router.scala:204:26]
wire _GEN_4 = _GEN_0 & fired_3; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_4; // @[Router.scala:203:29]
reg fired_4; // @[Router.scala:204:26]
wire _GEN_5 = _GEN_0 & fired_4; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_5; // @[Router.scala:203:29]
reg fired_5; // @[Router.scala:204:26]
wire _GEN_6 = _GEN_0 & fired_5; // @[Router.scala:204:26, :207:{33,71}] |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputBuffer( // @[InputUnit.scala:49:7]
input clock, // @[InputUnit.scala:49:7]
input reset, // @[InputUnit.scala:49:7]
input io_enq_0_valid, // @[InputUnit.scala:51:14]
input io_enq_0_bits_head, // @[InputUnit.scala:51:14]
input io_enq_0_bits_tail, // @[InputUnit.scala:51:14]
input [72:0] io_enq_0_bits_payload, // @[InputUnit.scala:51:14]
input [2:0] io_enq_0_bits_virt_channel_id, // @[InputUnit.scala:51:14]
output io_deq_0_bits_head, // @[InputUnit.scala:51:14]
output io_deq_0_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_0_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_1_ready, // @[InputUnit.scala:51:14]
output io_deq_1_valid, // @[InputUnit.scala:51:14]
output io_deq_1_bits_head, // @[InputUnit.scala:51:14]
output io_deq_1_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_1_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_2_ready, // @[InputUnit.scala:51:14]
output io_deq_2_valid, // @[InputUnit.scala:51:14]
output io_deq_2_bits_head, // @[InputUnit.scala:51:14]
output io_deq_2_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_2_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_3_ready, // @[InputUnit.scala:51:14]
output io_deq_3_valid, // @[InputUnit.scala:51:14]
output io_deq_3_bits_head, // @[InputUnit.scala:51:14]
output io_deq_3_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_3_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_4_ready, // @[InputUnit.scala:51:14]
output io_deq_4_valid, // @[InputUnit.scala:51:14]
output io_deq_4_bits_head, // @[InputUnit.scala:51:14]
output io_deq_4_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_4_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_5_ready, // @[InputUnit.scala:51:14]
output io_deq_5_valid, // @[InputUnit.scala:51:14]
output io_deq_5_bits_head, // @[InputUnit.scala:51:14]
output io_deq_5_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_5_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_6_ready, // @[InputUnit.scala:51:14]
output io_deq_6_valid, // @[InputUnit.scala:51:14]
output io_deq_6_bits_head, // @[InputUnit.scala:51:14]
output io_deq_6_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_6_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_7_ready, // @[InputUnit.scala:51:14]
output io_deq_7_valid, // @[InputUnit.scala:51:14]
output io_deq_7_bits_head, // @[InputUnit.scala:51:14]
output io_deq_7_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_7_bits_payload // @[InputUnit.scala:51:14]
);
wire _qs_7_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_6_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_5_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_4_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_3_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_2_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_1_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_0_io_enq_ready; // @[InputUnit.scala:90:49]
wire [74:0] _mem_ext_R0_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R1_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R2_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R3_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R4_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R5_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R6_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R7_data; // @[InputUnit.scala:85:18]
reg [4:0] heads_0; // @[InputUnit.scala:86:24]
reg [4:0] heads_1; // @[InputUnit.scala:86:24]
reg [4:0] heads_2; // @[InputUnit.scala:86:24]
reg [4:0] heads_3; // @[InputUnit.scala:86:24]
reg [4:0] heads_4; // @[InputUnit.scala:86:24]
reg [4:0] heads_5; // @[InputUnit.scala:86:24]
reg [4:0] heads_6; // @[InputUnit.scala:86:24]
reg [4:0] heads_7; // @[InputUnit.scala:86:24]
reg [4:0] tails_0; // @[InputUnit.scala:87:24]
reg [4:0] tails_1; // @[InputUnit.scala:87:24]
reg [4:0] tails_2; // @[InputUnit.scala:87:24]
reg [4:0] tails_3; // @[InputUnit.scala:87:24]
reg [4:0] tails_4; // @[InputUnit.scala:87:24]
reg [4:0] tails_5; // @[InputUnit.scala:87:24]
reg [4:0] tails_6; // @[InputUnit.scala:87:24]
reg [4:0] tails_7; // @[InputUnit.scala:87:24]
wire _tails_T_24 = io_enq_0_bits_virt_channel_id == 3'h0; // @[Mux.scala:32:36]
wire _tails_T_25 = io_enq_0_bits_virt_channel_id == 3'h1; // @[Mux.scala:32:36]
wire _tails_T_26 = io_enq_0_bits_virt_channel_id == 3'h2; // @[Mux.scala:32:36]
wire _tails_T_27 = io_enq_0_bits_virt_channel_id == 3'h3; // @[Mux.scala:32:36]
wire _tails_T_28 = io_enq_0_bits_virt_channel_id == 3'h4; // @[Mux.scala:32:36]
wire _tails_T_29 = io_enq_0_bits_virt_channel_id == 3'h5; // @[Mux.scala:32:36]
wire _tails_T_30 = io_enq_0_bits_virt_channel_id == 3'h6; // @[Mux.scala:32:36]
wire direct_to_q = (_tails_T_24 & _qs_0_io_enq_ready | _tails_T_25 & _qs_1_io_enq_ready | _tails_T_26 & _qs_2_io_enq_ready | _tails_T_27 & _qs_3_io_enq_ready | _tails_T_28 & _qs_4_io_enq_ready | _tails_T_29 & _qs_5_io_enq_ready | _tails_T_30 & _qs_6_io_enq_ready | (&io_enq_0_bits_virt_channel_id) & _qs_7_io_enq_ready) & (_tails_T_24 & heads_0 == tails_0 | _tails_T_25 & heads_1 == tails_1 | _tails_T_26 & heads_2 == tails_2 | _tails_T_27 & heads_3 == tails_3 | _tails_T_28 & heads_4 == tails_4 | _tails_T_29 & heads_5 == tails_5 | _tails_T_30 & heads_6 == tails_6 | (&io_enq_0_bits_virt_channel_id) & heads_7 == tails_7); // @[Mux.scala:30:73, :32:36]
wire mem_MPORT_en = io_enq_0_valid & ~direct_to_q; // @[InputUnit.scala:96:62, :100:{27,30}]
wire [7:0][4:0] _GEN = {{tails_7}, {tails_6}, {tails_5}, {tails_4}, {tails_3}, {tails_2}, {tails_1}, {tails_0}}; // @[InputUnit.scala:87:24, :102:16]
wire _GEN_0 = io_enq_0_bits_virt_channel_id == 3'h0; // @[InputUnit.scala:103:45]
wire _GEN_1 = io_enq_0_bits_virt_channel_id == 3'h1; // @[InputUnit.scala:103:45]
wire _GEN_2 = io_enq_0_bits_virt_channel_id == 3'h2; // @[InputUnit.scala:103:45]
wire _GEN_3 = io_enq_0_bits_virt_channel_id == 3'h3; // @[InputUnit.scala:103:45]
wire _GEN_4 = io_enq_0_bits_virt_channel_id == 3'h4; // @[InputUnit.scala:103:45]
wire _GEN_5 = io_enq_0_bits_virt_channel_id == 3'h5; // @[InputUnit.scala:103:45]
wire _GEN_6 = io_enq_0_bits_virt_channel_id == 3'h6; // @[InputUnit.scala:103:45]
wire _GEN_7 = io_enq_0_valid & direct_to_q; // @[InputUnit.scala:96:62, :107:34]
wire can_to_q_0 = heads_0 != tails_0 & _qs_0_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_1 = heads_1 != tails_1 & _qs_1_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_2 = heads_2 != tails_2 & _qs_2_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_3 = heads_3 != tails_3 & _qs_3_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_4 = heads_4 != tails_4 & _qs_4_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_5 = heads_5 != tails_5 & _qs_5_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_6 = heads_6 != tails_6 & _qs_6_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_7 = heads_7 != tails_7 & _qs_7_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire [7:0] to_q_oh_enc = can_to_q_0 ? 8'h1 : can_to_q_1 ? 8'h2 : can_to_q_2 ? 8'h4 : can_to_q_3 ? 8'h8 : can_to_q_4 ? 8'h10 : can_to_q_5 ? 8'h20 : can_to_q_6 ? 8'h40 : {can_to_q_7, 7'h0}; // @[OneHot.scala:58:35]
wire _GEN_8 = can_to_q_0 | can_to_q_1 | can_to_q_2 | can_to_q_3 | can_to_q_4 | can_to_q_5 | can_to_q_6 | can_to_q_7; // @[package.scala:81:59]
wire [4:0] head = (to_q_oh_enc[0] ? heads_0 : 5'h0) | (to_q_oh_enc[1] ? heads_1 : 5'h0) | (to_q_oh_enc[2] ? heads_2 : 5'h0) | (to_q_oh_enc[3] ? heads_3 : 5'h0) | (to_q_oh_enc[4] ? heads_4 : 5'h0) | (to_q_oh_enc[5] ? heads_5 : 5'h0) | (to_q_oh_enc[6] ? heads_6 : 5'h0) | (to_q_oh_enc[7] ? heads_7 : 5'h0); // @[OneHot.scala:83:30]
wire _GEN_9 = _GEN_8 & to_q_oh_enc[0]; // @[OneHot.scala:83:30]
wire _GEN_10 = _GEN_8 & to_q_oh_enc[1]; // @[OneHot.scala:83:30]
wire _GEN_11 = _GEN_8 & to_q_oh_enc[2]; // @[OneHot.scala:83:30]
wire _GEN_12 = _GEN_8 & to_q_oh_enc[3]; // @[OneHot.scala:83:30]
wire _GEN_13 = _GEN_8 & to_q_oh_enc[4]; // @[OneHot.scala:83:30]
wire _GEN_14 = _GEN_8 & to_q_oh_enc[5]; // @[OneHot.scala:83:30]
wire _GEN_15 = _GEN_8 & to_q_oh_enc[6]; // @[OneHot.scala:83:30]
wire _GEN_16 = _GEN_8 & to_q_oh_enc[7]; // @[OneHot.scala:83:30]
wire [4:0] _tails_T_49 = _GEN[io_enq_0_bits_virt_channel_id] == ({1'h0, {1'h0, {1'h0, {2{_tails_T_25}}} | {3{_tails_T_26}}} | (_tails_T_27 ? 4'hB : 4'h0) | {4{_tails_T_28}}} | (_tails_T_29 ? 5'h13 : 5'h0) | (_tails_T_30 ? 5'h17 : 5'h0) | ((&io_enq_0_bits_virt_channel_id) ? 5'h1B : 5'h0)) ? {_tails_T_29, {_tails_T_27, _tails_T_26, 2'h0} | (_tails_T_28 ? 4'hC : 4'h0)} | (_tails_T_30 ? 5'h14 : 5'h0) | ((&io_enq_0_bits_virt_channel_id) ? 5'h18 : 5'h0) : _GEN[io_enq_0_bits_virt_channel_id] + 5'h1; // @[Mux.scala:30:73, :32:36]
wire [2:0] _to_q_T_2 = to_q_oh_enc[7:5] | to_q_oh_enc[3:1]; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _to_q_T_4 = _to_q_T_2[2] | _to_q_T_2[0]; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [2:0] to_q = {|(to_q_oh_enc[7:4]), |(_to_q_T_2[2:1]), _to_q_T_4}; // @[OneHot.scala:30:18, :32:{10,14,28}]
wire [4:0] _heads_T_33 = head == ({1'h0, {1'h0, {1'h0, {2{to_q_oh_enc[1]}}} | {3{to_q_oh_enc[2]}}} | (to_q_oh_enc[3] ? 4'hB : 4'h0) | {4{to_q_oh_enc[4]}}} | (to_q_oh_enc[5] ? 5'h13 : 5'h0) | (to_q_oh_enc[6] ? 5'h17 : 5'h0) | (to_q_oh_enc[7] ? 5'h1B : 5'h0)) ? {to_q_oh_enc[5], {to_q_oh_enc[3:2], 2'h0} | (to_q_oh_enc[4] ? 4'hC : 4'h0)} | (to_q_oh_enc[6] ? 5'h14 : 5'h0) | (to_q_oh_enc[7] ? 5'h18 : 5'h0) : head + 5'h1; // @[OneHot.scala:83:30]
always @(posedge clock) begin // @[InputUnit.scala:49:7]
if (reset) begin // @[InputUnit.scala:49:7]
heads_0 <= 5'h0; // @[InputUnit.scala:86:24]
heads_1 <= 5'h0; // @[InputUnit.scala:86:24]
heads_2 <= 5'h4; // @[InputUnit.scala:86:24]
heads_3 <= 5'h8; // @[InputUnit.scala:86:24]
heads_4 <= 5'hC; // @[InputUnit.scala:86:24]
heads_5 <= 5'h10; // @[InputUnit.scala:86:24]
heads_6 <= 5'h14; // @[InputUnit.scala:86:24]
heads_7 <= 5'h18; // @[InputUnit.scala:86:24]
tails_0 <= 5'h0; // @[InputUnit.scala:87:24]
tails_1 <= 5'h0; // @[InputUnit.scala:87:24]
tails_2 <= 5'h4; // @[InputUnit.scala:87:24]
tails_3 <= 5'h8; // @[InputUnit.scala:87:24]
tails_4 <= 5'hC; // @[InputUnit.scala:87:24]
tails_5 <= 5'h10; // @[InputUnit.scala:87:24]
tails_6 <= 5'h14; // @[InputUnit.scala:87:24]
tails_7 <= 5'h18; // @[InputUnit.scala:87:24]
end
else begin // @[InputUnit.scala:49:7]
if (_GEN_8 & {to_q_oh_enc[7:4], |(_to_q_T_2[2:1]), _to_q_T_4} == 6'h0) // @[OneHot.scala:30:18, :32:{10,14,28}]
heads_0 <= _heads_T_33; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_8 & to_q == 3'h1) // @[OneHot.scala:32:10]
heads_1 <= _heads_T_33; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_8 & to_q == 3'h2) // @[OneHot.scala:32:10]
heads_2 <= _heads_T_33; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_8 & to_q == 3'h3) // @[OneHot.scala:32:10]
heads_3 <= _heads_T_33; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_8 & to_q == 3'h4) // @[OneHot.scala:32:10]
heads_4 <= _heads_T_33; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_8 & to_q == 3'h5) // @[OneHot.scala:32:10]
heads_5 <= _heads_T_33; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_8 & to_q == 3'h6) // @[OneHot.scala:32:10]
heads_6 <= _heads_T_33; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_8 & (&to_q)) // @[OneHot.scala:32:10]
heads_7 <= _heads_T_33; // @[InputUnit.scala:86:24, :122:27]
if (mem_MPORT_en & _GEN_0) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_0 <= _tails_T_49; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_1) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_1 <= _tails_T_49; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_2) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_2 <= _tails_T_49; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_3) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_3 <= _tails_T_49; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_4) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_4 <= _tails_T_49; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_5) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_5 <= _tails_T_49; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_6) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_6 <= _tails_T_49; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & (&io_enq_0_bits_virt_channel_id)) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_7 <= _tails_T_49; // @[InputUnit.scala:87:24, :103:51]
end
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File AsyncResetReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
/** This black-boxes an Async Reset
* (or Set)
* Register.
*
* Because Chisel doesn't support
* parameterized black boxes,
* we unfortunately have to
* instantiate a number of these.
*
* We also have to hard-code the set/
* reset behavior.
*
* Do not confuse an asynchronous
* reset signal with an asynchronously
* reset reg. You should still
* properly synchronize your reset
* deassertion.
*
* @param d Data input
* @param q Data Output
* @param clk Clock Input
* @param rst Reset Input
* @param en Write Enable Input
*
*/
class AsyncResetReg(resetValue: Int = 0) extends RawModule {
val io = IO(new Bundle {
val d = Input(Bool())
val q = Output(Bool())
val en = Input(Bool())
val clk = Input(Clock())
val rst = Input(Reset())
})
val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
class SimpleRegIO(val w: Int) extends Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
}
class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module {
override def desiredName = s"AsyncResetRegVec_w${w}_i${init}"
val io = IO(new SimpleRegIO(w))
val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
object AsyncResetReg {
// Create Single Registers
def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = {
val reg = Module(new AsyncResetReg(if (init) 1 else 0))
reg.io.d := d
reg.io.clk := clk
reg.io.rst := rst
reg.io.en := true.B
name.foreach(reg.suggestName(_))
reg.io.q
}
def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None)
def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name))
// Create Vectors of Registers
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = {
val w = updateData.getWidth max resetData.bitLength
val reg = Module(new AsyncResetRegVec(w, resetData))
name.foreach(reg.suggestName(_))
reg.io.d := updateData
reg.io.en := enable
reg.io.q
}
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData,
resetData, enable, Some(name))
def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B)
def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name))
def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable)
def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name))
def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B)
def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name))
}
| module IntSyncCrossingSource_n1x1_11( // @[Crossing.scala:41:9]
input clock, // @[Crossing.scala:41:9]
input reset // @[Crossing.scala:41:9]
);
wire auto_in_0 = 1'h0; // @[Crossing.scala:41:9]
wire auto_out_sync_0 = 1'h0; // @[Crossing.scala:41:9]
wire nodeIn_0 = 1'h0; // @[MixedNode.scala:551:17]
wire nodeOut_sync_0 = 1'h0; // @[MixedNode.scala:542:17]
AsyncResetRegVec_w1_i0_11 reg_0 ( // @[AsyncResetReg.scala:86:21]
.clock (clock),
.reset (reset)
); // @[AsyncResetReg.scala:86:21]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_154( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_270 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie2_is1_oe8_os24_4(); // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire [26:0] adjustedSig = 27'h2000000; // @[RoundAnyRawFNToRecFN.scala:114:22]
wire [22:0] _common_fractOut_T = 23'h400000; // @[RoundAnyRawFNToRecFN.scala:139:28]
wire [8:0] _expOut_T_2 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14]
wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14]
wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14]
wire [8:0] _expOut_T_12 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:253:14, :257:14, :261:14, :265:14]
wire [8:0] _expOut_T_1 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_11 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_18 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _expOut_T_20 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:18, :257:18, :261:18, :265:18, :269:16, :273:16, :277:16, :278:16]
wire [8:0] _sAdjustedExp_T_1 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] common_expOut = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _common_expOut_T = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _common_expOut_T_2 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_3 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_7 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_10 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_13 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_15 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_17 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] _expOut_T_19 = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [8:0] expOut = 9'h100; // @[RoundAnyRawFNToRecFN.scala:106:14, :122:31, :136:{38,55}, :252:24, :256:17, :260:17, :264:17, :268:18, :272:15, :276:15, :277:73]
wire [22:0] common_fractOut = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] _common_fractOut_T_1 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] _common_fractOut_T_2 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] _fractOut_T_2 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] _fractOut_T_3 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [22:0] fractOut = 23'h0; // @[RoundAnyRawFNToRecFN.scala:123:31, :138:16, :140:28, :280:12, :281:16, :283:11, :284:13]
wire [9:0] _sAdjustedExp_T = 10'h100; // @[RoundAnyRawFNToRecFN.scala:104:25, :136:55, :286:23]
wire [9:0] sAdjustedExp = 10'h100; // @[RoundAnyRawFNToRecFN.scala:106:31, :136:55, :286:23]
wire [9:0] _common_expOut_T_1 = 10'h100; // @[RoundAnyRawFNToRecFN.scala:136:55, :286:23]
wire [9:0] _io_out_T = 10'h100; // @[RoundAnyRawFNToRecFN.scala:136:55, :286:23]
wire [1:0] _io_exceptionFlags_T = 2'h0; // @[RoundAnyRawFNToRecFN.scala:288:23]
wire [3:0] _io_exceptionFlags_T_2 = 4'h0; // @[RoundAnyRawFNToRecFN.scala:288:53]
wire [4:0] io_exceptionFlags = 5'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:66]
wire [4:0] _io_exceptionFlags_T_3 = 5'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:66]
wire [32:0] io_out = 33'h80000000; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :286:33]
wire [32:0] _io_out_T_1 = 33'h80000000; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :286:33]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _roundMagUp_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _commonCase_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _commonCase_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _commonCase_T_2 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _commonCase_T_3 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire commonCase = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :90:53, :98:66, :237:{22,33,36,61,64}, :243:{32,60}]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:41]
wire [2:0] _io_exceptionFlags_T_1 = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16, :288:41]
wire [1:0] io_in_sig = 2'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16]
wire [3:0] io_in_sExp = 4'h4; // @[RoundAnyRawFNToRecFN.scala:48:5, :58:16]
wire io_invalidExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isZero = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53]
wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53]
wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53]
wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53]
wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53]
wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27]
wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63]
wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42]
wire common_overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire common_totalUnderflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire common_underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire common_inexact = 1'h0; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire isNaNOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:235:34]
wire notNaN_isSpecialInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:236:49]
wire overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:238:32]
wire underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:239:32]
wire _inexact_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:240:43]
wire inexact = 1'h0; // @[RoundAnyRawFNToRecFN.scala:240:28]
wire _pegMinNonzeroMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:20]
wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60]
wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45]
wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42]
wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39]
wire _notNaN_isInfOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:45]
wire notNaN_isInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:32]
wire signOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:250:22]
wire _expOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:253:32]
wire _fractOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:280:22]
wire _fractOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:280:38]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File SingleVCAllocator.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.random.{LFSR}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{ChannelRoutingInfo, FlowRoutingBundle}
// Allocates 1 VC per cycle
abstract class SingleVCAllocator(vP: VCAllocatorParams)(implicit p: Parameters) extends VCAllocator(vP)(p) {
// get single input
val mask = RegInit(0.U(allInParams.size.W))
val in_arb_reqs = Wire(Vec(allInParams.size, MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })))
val in_arb_vals = Wire(Vec(allInParams.size, Bool()))
val in_arb_filter = PriorityEncoderOH(Cat(in_arb_vals.asUInt, in_arb_vals.asUInt & ~mask))
val in_arb_sel = (in_arb_filter(allInParams.size-1,0) | (in_arb_filter >> allInParams.size))
when (in_arb_vals.orR) {
mask := Mux1H(in_arb_sel, (0 until allInParams.size).map { w => ~(0.U((w+1).W)) })
}
for (i <- 0 until allInParams.size) {
(0 until allOutParams.size).map { m =>
(0 until allOutParams(m).nVirtualChannels).map { n =>
in_arb_reqs(i)(m)(n) := io.req(i).bits.vc_sel(m)(n) && !io.channel_status(m)(n).occupied
}
}
in_arb_vals(i) := io.req(i).valid && in_arb_reqs(i).map(_.orR).toSeq.orR
}
// Input arbitration
io.req.foreach(_.ready := false.B)
val in_alloc = Wire(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val in_flow = Mux1H(in_arb_sel, io.req.map(_.bits.flow).toSeq)
val in_vc = Mux1H(in_arb_sel, io.req.map(_.bits.in_vc).toSeq)
val in_vc_sel = Mux1H(in_arb_sel, in_arb_reqs)
in_alloc := Mux(in_arb_vals.orR,
inputAllocPolicy(in_flow, in_vc_sel, OHToUInt(in_arb_sel), in_vc, io.req.map(_.fire).toSeq.orR),
0.U.asTypeOf(in_alloc))
// send allocation to inputunits
for (i <- 0 until allInParams.size) {
io.req(i).ready := in_arb_sel(i)
for (m <- 0 until allOutParams.size) {
(0 until allOutParams(m).nVirtualChannels).map { n =>
io.resp(i).vc_sel(m)(n) := in_alloc(m)(n)
}
}
assert(PopCount(io.resp(i).vc_sel.asUInt) <= 1.U)
}
// send allocation to output units
for (i <- 0 until allOutParams.size) {
(0 until allOutParams(i).nVirtualChannels).map { j =>
io.out_allocs(i)(j).alloc := in_alloc(i)(j)
io.out_allocs(i)(j).flow := in_flow
}
}
}
File VCAllocator.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import freechips.rocketchip.rocket.{DecodeLogic}
import constellation.channel._
import constellation.noc.{HasNoCParams}
import constellation.routing.{FlowRoutingBundle, FlowRoutingInfo, ChannelRoutingInfo}
class VCAllocReq(
val inParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams])
(implicit val p: Parameters) extends Bundle
with HasRouterOutputParams
with HasNoCParams {
val flow = new FlowRoutingBundle
val in_vc = UInt(log2Ceil(inParam.nVirtualChannels).W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
class VCAllocResp(val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
case class VCAllocatorParams(
routerParams: RouterParams,
inParams: Seq[ChannelParams],
outParams: Seq[ChannelParams],
ingressParams: Seq[IngressChannelParams],
egressParams: Seq[EgressChannelParams])
abstract class VCAllocator(val vP: VCAllocatorParams)(implicit val p: Parameters) extends Module
with HasRouterParams
with HasRouterInputParams
with HasRouterOutputParams
with HasNoCParams {
val routerParams = vP.routerParams
val inParams = vP.inParams
val outParams = vP.outParams
val ingressParams = vP.ingressParams
val egressParams = vP.egressParams
val io = IO(new Bundle {
val req = MixedVec(allInParams.map { u =>
Flipped(Decoupled(new VCAllocReq(u, outParams, egressParams)))
})
val resp = MixedVec(allInParams.map { u =>
Output(new VCAllocResp(outParams, egressParams))
})
val channel_status = MixedVec(allOutParams.map { u =>
Vec(u.nVirtualChannels, Input(new OutputChannelStatus)) })
val out_allocs = MixedVec(allOutParams.map { u =>
Vec(u.nVirtualChannels, Output(new OutputChannelAlloc)) })
})
val nOutChannels = allOutParams.map(_.nVirtualChannels).sum
def inputAllocPolicy(
flow: FlowRoutingBundle, vc_sel: MixedVec[Vec[Bool]],
inId: UInt, inVId: UInt, fire: Bool): MixedVec[Vec[Bool]]
def outputAllocPolicy(
out: ChannelRoutingInfo,
flows: Seq[FlowRoutingBundle], reqs: Seq[Bool], fire: Bool): Vec[Bool]
}
File ISLIP.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.random.{LFSR}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{ChannelRoutingInfo, FlowRoutingBundle}
trait ISLIP { this: VCAllocator =>
def islip(in: UInt, fire: Bool): UInt = {
val w = in.getWidth
if (w > 1) {
val mask = RegInit(0.U(w.W))
val full = Cat(in, in & ~mask)
val oh = PriorityEncoderOH(full)
val sel = (oh(w-1,0) | (oh >> w))
when (fire) {
mask := MuxCase(0.U, (0 until w).map { i =>
sel(i) -> ~(0.U((i+1).W))
})
}
sel
} else {
in
}
}
def inputAllocPolicy(flow: FlowRoutingBundle, vc_sel: MixedVec[Vec[Bool]], inId: UInt, inVId: UInt, fire: Bool) = {
islip(vc_sel.asUInt, fire).asTypeOf(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool())}))
}
def outputAllocPolicy(channel: ChannelRoutingInfo, flows: Seq[FlowRoutingBundle], reqs: Seq[Bool], fire: Bool) = {
islip(VecInit(reqs).asUInt, fire).asTypeOf(Vec(allInParams.size, Bool()))
}
}
class ISLIPMultiVCAllocator(vP: VCAllocatorParams)(implicit p: Parameters) extends MultiVCAllocator(vP)(p)
with ISLIP
class RotatingSingleVCAllocator(vP: VCAllocatorParams)(implicit p: Parameters) extends SingleVCAllocator(vP)(p)
with ISLIP
| module RotatingSingleVCAllocator_15( // @[ISLIP.scala:43:7]
input clock, // @[ISLIP.scala:43:7]
input reset, // @[ISLIP.scala:43:7]
output io_req_1_ready, // @[VCAllocator.scala:49:14]
input io_req_1_valid, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_2, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_3, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_8, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_9, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_10, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_11, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_12, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_13, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_14, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_15, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_16, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_17, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_18, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_19, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_20, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_21, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_2, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_3, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_8, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_9, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_10, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_11, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_12, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_13, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_14, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_15, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_16, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_17, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_18, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_19, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_20, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_21, // @[VCAllocator.scala:49:14]
output io_req_0_ready, // @[VCAllocator.scala:49:14]
input io_req_0_valid, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_3, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_9, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_10, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_11, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_12, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_13, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_14, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_15, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_16, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_17, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_18, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_19, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_20, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_21, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_2, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_3, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_8, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_9, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_10, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_11, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_12, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_13, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_14, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_15, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_16, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_17, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_18, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_19, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_20, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_0_21, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_2, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_3, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_8, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_9, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_10, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_11, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_12, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_13, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_14, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_15, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_16, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_17, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_18, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_19, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_20, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_21, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_2, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_3, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_8, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_9, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_10, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_11, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_12, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_13, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_14, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_15, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_16, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_17, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_18, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_19, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_20, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_21, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_3, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_9, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_10, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_11, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_12, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_13, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_14, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_15, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_16, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_17, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_18, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_19, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_20, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_21, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_2, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_3, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_8, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_9, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_10, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_11, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_12, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_13, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_14, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_15, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_16, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_17, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_18, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_19, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_20, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_0_21, // @[VCAllocator.scala:49:14]
input io_channel_status_1_2_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_3_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_8_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_9_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_10_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_11_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_12_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_13_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_14_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_15_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_16_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_17_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_18_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_19_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_20_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_21_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_2_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_3_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_8_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_9_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_10_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_11_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_12_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_13_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_14_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_15_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_16_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_17_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_18_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_19_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_20_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_21_occupied, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_2_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_3_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_8_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_9_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_10_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_11_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_12_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_13_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_14_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_15_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_16_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_17_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_18_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_19_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_20_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_21_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_2_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_3_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_8_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_9_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_10_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_11_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_12_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_13_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_14_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_15_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_16_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_17_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_18_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_19_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_20_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_21_alloc // @[VCAllocator.scala:49:14]
);
wire in_arb_vals_1; // @[SingleVCAllocator.scala:32:39]
wire in_arb_vals_0; // @[SingleVCAllocator.scala:32:39]
reg [1:0] mask; // @[SingleVCAllocator.scala:16:21]
wire [1:0] _in_arb_filter_T_3 = {in_arb_vals_1, in_arb_vals_0} & ~mask; // @[SingleVCAllocator.scala:16:21, :19:{57,84,86}, :32:39]
wire [3:0] in_arb_filter = _in_arb_filter_T_3[0] ? 4'h1 : _in_arb_filter_T_3[1] ? 4'h2 : in_arb_vals_0 ? 4'h4 : {in_arb_vals_1, 3'h0}; // @[OneHot.scala:85:71]
wire [1:0] in_arb_sel = in_arb_filter[1:0] | in_arb_filter[3:2]; // @[Mux.scala:50:70]
wire _GEN = in_arb_vals_0 | in_arb_vals_1; // @[package.scala:81:59]
wire in_arb_reqs_0_0_2 = io_req_0_bits_vc_sel_0_2 & ~io_channel_status_0_2_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_3 = io_req_0_bits_vc_sel_0_3 & ~io_channel_status_0_3_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_8 = io_req_0_bits_vc_sel_0_8 & ~io_channel_status_0_8_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_9 = io_req_0_bits_vc_sel_0_9 & ~io_channel_status_0_9_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_10 = io_req_0_bits_vc_sel_0_10 & ~io_channel_status_0_10_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_11 = io_req_0_bits_vc_sel_0_11 & ~io_channel_status_0_11_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_12 = io_req_0_bits_vc_sel_0_12 & ~io_channel_status_0_12_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_13 = io_req_0_bits_vc_sel_0_13 & ~io_channel_status_0_13_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_14 = io_req_0_bits_vc_sel_0_14 & ~io_channel_status_0_14_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_15 = io_req_0_bits_vc_sel_0_15 & ~io_channel_status_0_15_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_16 = io_req_0_bits_vc_sel_0_16 & ~io_channel_status_0_16_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_17 = io_req_0_bits_vc_sel_0_17 & ~io_channel_status_0_17_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_18 = io_req_0_bits_vc_sel_0_18 & ~io_channel_status_0_18_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_19 = io_req_0_bits_vc_sel_0_19 & ~io_channel_status_0_19_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_20 = io_req_0_bits_vc_sel_0_20 & ~io_channel_status_0_20_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_0_21 = io_req_0_bits_vc_sel_0_21 & ~io_channel_status_0_21_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_3 = io_req_0_bits_vc_sel_1_3 & ~io_channel_status_1_3_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_9 = io_req_0_bits_vc_sel_1_9 & ~io_channel_status_1_9_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_10 = io_req_0_bits_vc_sel_1_10 & ~io_channel_status_1_10_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_11 = io_req_0_bits_vc_sel_1_11 & ~io_channel_status_1_11_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_12 = io_req_0_bits_vc_sel_1_12 & ~io_channel_status_1_12_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_13 = io_req_0_bits_vc_sel_1_13 & ~io_channel_status_1_13_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_14 = io_req_0_bits_vc_sel_1_14 & ~io_channel_status_1_14_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_15 = io_req_0_bits_vc_sel_1_15 & ~io_channel_status_1_15_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_16 = io_req_0_bits_vc_sel_1_16 & ~io_channel_status_1_16_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_17 = io_req_0_bits_vc_sel_1_17 & ~io_channel_status_1_17_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_18 = io_req_0_bits_vc_sel_1_18 & ~io_channel_status_1_18_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_19 = io_req_0_bits_vc_sel_1_19 & ~io_channel_status_1_19_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_20 = io_req_0_bits_vc_sel_1_20 & ~io_channel_status_1_20_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_21 = io_req_0_bits_vc_sel_1_21 & ~io_channel_status_1_21_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
assign in_arb_vals_0 = io_req_0_valid & (in_arb_reqs_0_0_2 | in_arb_reqs_0_0_3 | in_arb_reqs_0_0_8 | in_arb_reqs_0_0_9 | in_arb_reqs_0_0_10 | in_arb_reqs_0_0_11 | in_arb_reqs_0_0_12 | in_arb_reqs_0_0_13 | in_arb_reqs_0_0_14 | in_arb_reqs_0_0_15 | in_arb_reqs_0_0_16 | in_arb_reqs_0_0_17 | in_arb_reqs_0_0_18 | in_arb_reqs_0_0_19 | in_arb_reqs_0_0_20 | in_arb_reqs_0_0_21 | in_arb_reqs_0_1_3 | in_arb_reqs_0_1_9 | in_arb_reqs_0_1_10 | in_arb_reqs_0_1_11 | in_arb_reqs_0_1_12 | in_arb_reqs_0_1_13 | in_arb_reqs_0_1_14 | in_arb_reqs_0_1_15 | in_arb_reqs_0_1_16 | in_arb_reqs_0_1_17 | in_arb_reqs_0_1_18 | in_arb_reqs_0_1_19 | in_arb_reqs_0_1_20 | in_arb_reqs_0_1_21); // @[package.scala:81:59]
wire in_arb_reqs_1_0_2 = io_req_1_bits_vc_sel_0_2 & ~io_channel_status_0_2_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_3 = io_req_1_bits_vc_sel_0_3 & ~io_channel_status_0_3_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_8 = io_req_1_bits_vc_sel_0_8 & ~io_channel_status_0_8_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_9 = io_req_1_bits_vc_sel_0_9 & ~io_channel_status_0_9_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_10 = io_req_1_bits_vc_sel_0_10 & ~io_channel_status_0_10_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_11 = io_req_1_bits_vc_sel_0_11 & ~io_channel_status_0_11_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_12 = io_req_1_bits_vc_sel_0_12 & ~io_channel_status_0_12_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_13 = io_req_1_bits_vc_sel_0_13 & ~io_channel_status_0_13_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_14 = io_req_1_bits_vc_sel_0_14 & ~io_channel_status_0_14_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_15 = io_req_1_bits_vc_sel_0_15 & ~io_channel_status_0_15_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_16 = io_req_1_bits_vc_sel_0_16 & ~io_channel_status_0_16_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_17 = io_req_1_bits_vc_sel_0_17 & ~io_channel_status_0_17_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_18 = io_req_1_bits_vc_sel_0_18 & ~io_channel_status_0_18_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_19 = io_req_1_bits_vc_sel_0_19 & ~io_channel_status_0_19_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_20 = io_req_1_bits_vc_sel_0_20 & ~io_channel_status_0_20_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_21 = io_req_1_bits_vc_sel_0_21 & ~io_channel_status_0_21_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_2 = io_req_1_bits_vc_sel_1_2 & ~io_channel_status_1_2_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_3 = io_req_1_bits_vc_sel_1_3 & ~io_channel_status_1_3_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_8 = io_req_1_bits_vc_sel_1_8 & ~io_channel_status_1_8_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_9 = io_req_1_bits_vc_sel_1_9 & ~io_channel_status_1_9_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_10 = io_req_1_bits_vc_sel_1_10 & ~io_channel_status_1_10_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_11 = io_req_1_bits_vc_sel_1_11 & ~io_channel_status_1_11_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_12 = io_req_1_bits_vc_sel_1_12 & ~io_channel_status_1_12_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_13 = io_req_1_bits_vc_sel_1_13 & ~io_channel_status_1_13_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_14 = io_req_1_bits_vc_sel_1_14 & ~io_channel_status_1_14_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_15 = io_req_1_bits_vc_sel_1_15 & ~io_channel_status_1_15_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_16 = io_req_1_bits_vc_sel_1_16 & ~io_channel_status_1_16_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_17 = io_req_1_bits_vc_sel_1_17 & ~io_channel_status_1_17_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_18 = io_req_1_bits_vc_sel_1_18 & ~io_channel_status_1_18_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_19 = io_req_1_bits_vc_sel_1_19 & ~io_channel_status_1_19_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_20 = io_req_1_bits_vc_sel_1_20 & ~io_channel_status_1_20_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_21 = io_req_1_bits_vc_sel_1_21 & ~io_channel_status_1_21_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
assign in_arb_vals_1 = io_req_1_valid & (in_arb_reqs_1_0_2 | in_arb_reqs_1_0_3 | in_arb_reqs_1_0_8 | in_arb_reqs_1_0_9 | in_arb_reqs_1_0_10 | in_arb_reqs_1_0_11 | in_arb_reqs_1_0_12 | in_arb_reqs_1_0_13 | in_arb_reqs_1_0_14 | in_arb_reqs_1_0_15 | in_arb_reqs_1_0_16 | in_arb_reqs_1_0_17 | in_arb_reqs_1_0_18 | in_arb_reqs_1_0_19 | in_arb_reqs_1_0_20 | in_arb_reqs_1_0_21 | in_arb_reqs_1_1_2 | in_arb_reqs_1_1_3 | in_arb_reqs_1_1_8 | in_arb_reqs_1_1_9 | in_arb_reqs_1_1_10 | in_arb_reqs_1_1_11 | in_arb_reqs_1_1_12 | in_arb_reqs_1_1_13 | in_arb_reqs_1_1_14 | in_arb_reqs_1_1_15 | in_arb_reqs_1_1_16 | in_arb_reqs_1_1_17 | in_arb_reqs_1_1_18 | in_arb_reqs_1_1_19 | in_arb_reqs_1_1_20 | in_arb_reqs_1_1_21); // @[package.scala:81:59]
wire _in_vc_sel_T_10 = in_arb_sel[0] & in_arb_reqs_0_0_2 | in_arb_sel[1] & in_arb_reqs_1_0_2; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_13 = in_arb_sel[0] & in_arb_reqs_0_0_3 | in_arb_sel[1] & in_arb_reqs_1_0_3; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_28 = in_arb_sel[0] & in_arb_reqs_0_0_8 | in_arb_sel[1] & in_arb_reqs_1_0_8; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_31 = in_arb_sel[0] & in_arb_reqs_0_0_9 | in_arb_sel[1] & in_arb_reqs_1_0_9; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_34 = in_arb_sel[0] & in_arb_reqs_0_0_10 | in_arb_sel[1] & in_arb_reqs_1_0_10; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_37 = in_arb_sel[0] & in_arb_reqs_0_0_11 | in_arb_sel[1] & in_arb_reqs_1_0_11; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_40 = in_arb_sel[0] & in_arb_reqs_0_0_12 | in_arb_sel[1] & in_arb_reqs_1_0_12; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_43 = in_arb_sel[0] & in_arb_reqs_0_0_13 | in_arb_sel[1] & in_arb_reqs_1_0_13; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_46 = in_arb_sel[0] & in_arb_reqs_0_0_14 | in_arb_sel[1] & in_arb_reqs_1_0_14; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_49 = in_arb_sel[0] & in_arb_reqs_0_0_15 | in_arb_sel[1] & in_arb_reqs_1_0_15; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_52 = in_arb_sel[0] & in_arb_reqs_0_0_16 | in_arb_sel[1] & in_arb_reqs_1_0_16; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_55 = in_arb_sel[0] & in_arb_reqs_0_0_17 | in_arb_sel[1] & in_arb_reqs_1_0_17; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_58 = in_arb_sel[0] & in_arb_reqs_0_0_18 | in_arb_sel[1] & in_arb_reqs_1_0_18; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_61 = in_arb_sel[0] & in_arb_reqs_0_0_19 | in_arb_sel[1] & in_arb_reqs_1_0_19; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_64 = in_arb_sel[0] & in_arb_reqs_0_0_20 | in_arb_sel[1] & in_arb_reqs_1_0_20; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_67 = in_arb_sel[0] & in_arb_reqs_0_0_21 | in_arb_sel[1] & in_arb_reqs_1_0_21; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_75 = in_arb_sel[1] & in_arb_reqs_1_1_2; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_79 = in_arb_sel[0] & in_arb_reqs_0_1_3 | in_arb_sel[1] & in_arb_reqs_1_1_3; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_93 = in_arb_sel[1] & in_arb_reqs_1_1_8; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_97 = in_arb_sel[0] & in_arb_reqs_0_1_9 | in_arb_sel[1] & in_arb_reqs_1_1_9; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_100 = in_arb_sel[0] & in_arb_reqs_0_1_10 | in_arb_sel[1] & in_arb_reqs_1_1_10; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_103 = in_arb_sel[0] & in_arb_reqs_0_1_11 | in_arb_sel[1] & in_arb_reqs_1_1_11; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_106 = in_arb_sel[0] & in_arb_reqs_0_1_12 | in_arb_sel[1] & in_arb_reqs_1_1_12; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_109 = in_arb_sel[0] & in_arb_reqs_0_1_13 | in_arb_sel[1] & in_arb_reqs_1_1_13; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_112 = in_arb_sel[0] & in_arb_reqs_0_1_14 | in_arb_sel[1] & in_arb_reqs_1_1_14; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_115 = in_arb_sel[0] & in_arb_reqs_0_1_15 | in_arb_sel[1] & in_arb_reqs_1_1_15; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_118 = in_arb_sel[0] & in_arb_reqs_0_1_16 | in_arb_sel[1] & in_arb_reqs_1_1_16; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_121 = in_arb_sel[0] & in_arb_reqs_0_1_17 | in_arb_sel[1] & in_arb_reqs_1_1_17; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_124 = in_arb_sel[0] & in_arb_reqs_0_1_18 | in_arb_sel[1] & in_arb_reqs_1_1_18; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_127 = in_arb_sel[0] & in_arb_reqs_0_1_19 | in_arb_sel[1] & in_arb_reqs_1_1_19; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_130 = in_arb_sel[0] & in_arb_reqs_0_1_20 | in_arb_sel[1] & in_arb_reqs_1_1_20; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_133 = in_arb_sel[0] & in_arb_reqs_0_1_21 | in_arb_sel[1] & in_arb_reqs_1_1_21; // @[Mux.scala:30:73, :32:36]
reg [43:0] mask_1; // @[ISLIP.scala:17:25]
wire [43:0] _full_T_1 = {_in_vc_sel_T_133, _in_vc_sel_T_130, _in_vc_sel_T_127, _in_vc_sel_T_124, _in_vc_sel_T_121, _in_vc_sel_T_118, _in_vc_sel_T_115, _in_vc_sel_T_112, _in_vc_sel_T_109, _in_vc_sel_T_106, _in_vc_sel_T_103, _in_vc_sel_T_100, _in_vc_sel_T_97, _in_vc_sel_T_93, 4'h0, _in_vc_sel_T_79, _in_vc_sel_T_75, 2'h0, _in_vc_sel_T_67, _in_vc_sel_T_64, _in_vc_sel_T_61, _in_vc_sel_T_58, _in_vc_sel_T_55, _in_vc_sel_T_52, _in_vc_sel_T_49, _in_vc_sel_T_46, _in_vc_sel_T_43, _in_vc_sel_T_40, _in_vc_sel_T_37, _in_vc_sel_T_34, _in_vc_sel_T_31, _in_vc_sel_T_28, 4'h0, _in_vc_sel_T_13, _in_vc_sel_T_10, 2'h0} & ~mask_1; // @[Mux.scala:30:73]
wire [87:0] oh =
_full_T_1[0]
? 88'h1
: _full_T_1[1]
? 88'h2
: _full_T_1[2]
? 88'h4
: _full_T_1[3]
? 88'h8
: _full_T_1[4]
? 88'h10
: _full_T_1[5]
? 88'h20
: _full_T_1[6]
? 88'h40
: _full_T_1[7]
? 88'h80
: _full_T_1[8]
? 88'h100
: _full_T_1[9]
? 88'h200
: _full_T_1[10]
? 88'h400
: _full_T_1[11]
? 88'h800
: _full_T_1[12]
? 88'h1000
: _full_T_1[13]
? 88'h2000
: _full_T_1[14]
? 88'h4000
: _full_T_1[15]
? 88'h8000
: _full_T_1[16]
? 88'h10000
: _full_T_1[17]
? 88'h20000
: _full_T_1[18]
? 88'h40000
: _full_T_1[19]
? 88'h80000
: _full_T_1[20]
? 88'h100000
: _full_T_1[21]
? 88'h200000
: _full_T_1[22]
? 88'h400000
: _full_T_1[23]
? 88'h800000
: _full_T_1[24]
? 88'h1000000
: _full_T_1[25]
? 88'h2000000
: _full_T_1[26] ? 88'h4000000 : _full_T_1[27] ? 88'h8000000 : _full_T_1[28] ? 88'h10000000 : _full_T_1[29] ? 88'h20000000 : _full_T_1[30] ? 88'h40000000 : _full_T_1[31] ? 88'h80000000 : _full_T_1[32] ? 88'h100000000 : _full_T_1[33] ? 88'h200000000 : _full_T_1[34] ? 88'h400000000 : _full_T_1[35] ? 88'h800000000 : _full_T_1[36] ? 88'h1000000000 : _full_T_1[37] ? 88'h2000000000 : _full_T_1[38] ? 88'h4000000000 : _full_T_1[39] ? 88'h8000000000 : _full_T_1[40] ? 88'h10000000000 : _full_T_1[41] ? 88'h20000000000 : _full_T_1[42] ? 88'h40000000000 : _full_T_1[43] ? 88'h80000000000 : _in_vc_sel_T_10 ? 88'h400000000000 : _in_vc_sel_T_13 ? 88'h800000000000 : _in_vc_sel_T_28 ? 88'h10000000000000 : _in_vc_sel_T_31 ? 88'h20000000000000 : _in_vc_sel_T_34 ? 88'h40000000000000 : _in_vc_sel_T_37 ? 88'h80000000000000 : _in_vc_sel_T_40 ? 88'h100000000000000 : _in_vc_sel_T_43 ? 88'h200000000000000 : _in_vc_sel_T_46 ? 88'h400000000000000 : _in_vc_sel_T_49 ? 88'h800000000000000 : _in_vc_sel_T_52 ? 88'h1000000000000000 : _in_vc_sel_T_55 ? 88'h2000000000000000 : _in_vc_sel_T_58 ? 88'h4000000000000000 : _in_vc_sel_T_61 ? 88'h8000000000000000 : _in_vc_sel_T_64 ? 88'h10000000000000000 : _in_vc_sel_T_67 ? 88'h20000000000000000 : _in_vc_sel_T_75 ? 88'h100000000000000000 : _in_vc_sel_T_79 ? 88'h200000000000000000 : _in_vc_sel_T_93 ? 88'h4000000000000000000 : _in_vc_sel_T_97 ? 88'h8000000000000000000 : _in_vc_sel_T_100 ? 88'h10000000000000000000 : _in_vc_sel_T_103 ? 88'h20000000000000000000 : _in_vc_sel_T_106 ? 88'h40000000000000000000 : _in_vc_sel_T_109 ? 88'h80000000000000000000 : _in_vc_sel_T_112 ? 88'h100000000000000000000 : _in_vc_sel_T_115 ? 88'h200000000000000000000 : _in_vc_sel_T_118 ? 88'h400000000000000000000 : _in_vc_sel_T_121 ? 88'h800000000000000000000 : _in_vc_sel_T_124 ? 88'h1000000000000000000000 : _in_vc_sel_T_127 ? 88'h2000000000000000000000 : _in_vc_sel_T_130 ? 88'h4000000000000000000000 : {_in_vc_sel_T_133, 87'h0}; // @[OneHot.scala:85:71]
wire [43:0] sel = oh[43:0] | oh[87:44]; // @[Mux.scala:50:70]
wire in_alloc_1_2 = _GEN & sel[24]; // @[package.scala:81:59]
wire in_alloc_1_3 = _GEN & sel[25]; // @[package.scala:81:59]
wire in_alloc_1_8 = _GEN & sel[30]; // @[package.scala:81:59]
wire in_alloc_1_9 = _GEN & sel[31]; // @[package.scala:81:59]
wire in_alloc_1_10 = _GEN & sel[32]; // @[package.scala:81:59]
wire in_alloc_1_11 = _GEN & sel[33]; // @[package.scala:81:59]
wire in_alloc_1_12 = _GEN & sel[34]; // @[package.scala:81:59]
wire in_alloc_1_13 = _GEN & sel[35]; // @[package.scala:81:59]
wire in_alloc_1_14 = _GEN & sel[36]; // @[package.scala:81:59]
wire in_alloc_1_15 = _GEN & sel[37]; // @[package.scala:81:59]
wire in_alloc_1_16 = _GEN & sel[38]; // @[package.scala:81:59]
wire in_alloc_1_17 = _GEN & sel[39]; // @[package.scala:81:59]
wire in_alloc_1_18 = _GEN & sel[40]; // @[package.scala:81:59]
wire in_alloc_1_19 = _GEN & sel[41]; // @[package.scala:81:59]
wire in_alloc_1_20 = _GEN & sel[42]; // @[package.scala:81:59]
wire in_alloc_1_21 = _GEN & sel[43]; // @[package.scala:81:59]
wire in_alloc_0_2 = _GEN & sel[2]; // @[package.scala:81:59]
wire in_alloc_0_3 = _GEN & sel[3]; // @[package.scala:81:59]
wire in_alloc_0_8 = _GEN & sel[8]; // @[package.scala:81:59]
wire in_alloc_0_9 = _GEN & sel[9]; // @[package.scala:81:59]
wire in_alloc_0_10 = _GEN & sel[10]; // @[package.scala:81:59]
wire in_alloc_0_11 = _GEN & sel[11]; // @[package.scala:81:59]
wire in_alloc_0_12 = _GEN & sel[12]; // @[package.scala:81:59]
wire in_alloc_0_13 = _GEN & sel[13]; // @[package.scala:81:59]
wire in_alloc_0_14 = _GEN & sel[14]; // @[package.scala:81:59]
wire in_alloc_0_15 = _GEN & sel[15]; // @[package.scala:81:59]
wire in_alloc_0_16 = _GEN & sel[16]; // @[package.scala:81:59]
wire in_alloc_0_17 = _GEN & sel[17]; // @[package.scala:81:59]
wire in_alloc_0_18 = _GEN & sel[18]; // @[package.scala:81:59]
wire in_alloc_0_19 = _GEN & sel[19]; // @[package.scala:81:59]
wire in_alloc_0_20 = _GEN & sel[20]; // @[package.scala:81:59]
wire in_alloc_0_21 = _GEN & sel[21]; // @[package.scala:81:59] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File Arbiter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
object TLArbiter
{
// (valids, select) => readys
type Policy = (Integer, UInt, Bool) => UInt
val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0)
val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width))
val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else {
val valid = valids(width-1, 0)
assert (valid === valids)
val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0))
val filter = Cat(valid & ~mask, valid)
val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width)
val readys = ~((unready >> width) & unready(width-1, 0))
when (select && valid.orR) {
mask := leftOR(readys & valid, width)
}
readys(width-1, 0)
}
def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = {
apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = {
if (sources.isEmpty) {
sink.bits := DontCare
} else if (sources.size == 1) {
sink :<>= sources.head._2
} else {
val pairs = sources.toList
val beatsIn = pairs.map(_._1)
val sourcesIn = pairs.map(_._2)
// The number of beats which remain to be sent
val beatsLeft = RegInit(0.U)
val idle = beatsLeft === 0.U
val latch = idle && sink.ready // winner (if any) claims sink
// Who wants access to the sink?
val valids = sourcesIn.map(_.valid)
// Arbitrate amongst the requests
val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools)
// Which request wins arbitration?
val winner = VecInit((readys zip valids) map { case (r,v) => r&&v })
// Confirm the policy works properly
require (readys.size == valids.size)
// Never two winners
val prefixOR = winner.scanLeft(false.B)(_||_).init
assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _})
// If there was any request, there is a winner
assert (!valids.reduce(_||_) || winner.reduce(_||_))
// Track remaining beats
val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) }
val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats
beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire)
// The one-hot source granted access in the previous cycle
val state = RegInit(VecInit(Seq.fill(sources.size)(false.B)))
val muxState = Mux(idle, winner, state)
state := muxState
val allowed = Mux(idle, readys, state)
(sourcesIn zip allowed) foreach { case (s, r) =>
s.ready := sink.ready && r
}
sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids))
sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits))
}
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
abstract class DecoupledArbiterTest(
policy: TLArbiter.Policy,
txns: Int,
timeout: Int,
val numSources: Int,
beatsLeftFromIdx: Int => UInt)
(implicit p: Parameters) extends UnitTest(timeout)
{
val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W))))
dontTouch(sources.suggestName("sources"))
val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W)))
dontTouch(sink.suggestName("sink"))
val count = RegInit(0.U(log2Ceil(txns).W))
val lfsr = LFSR(16, true.B)
sources.zipWithIndex.map { case (z, i) => z.bits := i.U }
TLArbiter(policy)(sink, sources.zipWithIndex.map {
case (z, i) => (beatsLeftFromIdx(i), z)
}:_*)
count := count + 1.U
io.finished := count >= txns.U
}
/** This tests that when a specific pattern of source valids are driven,
* a new index from amongst that pattern is always selected,
* unless one of those sources takes multiple beats,
* in which case the same index should be selected until the arbiter goes idle.
*/
class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false)
(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U)
{
val lastWinner = RegInit((numSources+1).U)
val beatsLeft = RegInit(0.U(log2Ceil(numSources).W))
val first = lastWinner > numSources.U
val valid = lfsr(0)
val ready = lfsr(15)
sink.ready := ready
sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way
case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid)
}
when (sink.fire) {
if (print) { printf("TestRobin: %d\n", sink.bits) }
when (beatsLeft === 0.U) {
assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.")
lastWinner := sink.bits
beatsLeft := sink.bits
} .otherwise {
assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats")
beatsLeft := beatsLeft - 1.U
}
}
if (print) {
when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) }
}
}
/** This tests that the lowest index is always selected across random single cycle transactions. */
class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U)
{
def assertLowest(id: Int): Unit = {
when (sources(id).valid) {
assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.")
}
}
sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) }
sink.ready := lfsr(15)
when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) }
}
/** This tests that the highest index is always selected across random single cycle transactions. */
class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U)
{
def assertHighest(id: Int): Unit = {
when (sources(id).valid) {
assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.")
}
}
sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) }
sink.ready := lfsr(15)
when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) }
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue}
import freechips.rocketchip.util.BundleField
// Trades off slave port proximity against routing resource cost
object ForceFanout
{
def apply[T](
a: TriStateValue = TriStateValue.unset,
b: TriStateValue = TriStateValue.unset,
c: TriStateValue = TriStateValue.unset,
d: TriStateValue = TriStateValue.unset,
e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) =
{
body(p.alterPartial {
case ForceFanoutKey => p(ForceFanoutKey) match {
case ForceFanoutParams(pa, pb, pc, pd, pe) =>
ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe))
}
})
}
}
private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean)
private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false))
class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
val fifoIdMapper = fifoIdFactory()
port.managers map { manager => manager.v1copy(
fifoId = manager.fifoId.map(fifoIdMapper(_))
)}
}
)
}
){
override def circuitIdentity = outputs.size == 1 && inputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
if ((node.in.size * node.out.size) > (8*32)) {
println (s"!!! WARNING !!!")
println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.")
println (s"!!! WARNING !!!")
}
val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle))
override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_")
TLXbar.circuit(policy, node.in, node.out)
}
}
object TLXbar
{
def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId))
def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId))
def assignRanges(sizes: Seq[Int]) = {
val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) }
val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size
val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions
val ranges = (tuples zip starts) map { case ((sz, i), st) =>
(if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i)
}
ranges.sortBy(_._2).map(_._1) // Restore orignal order
}
def relabeler() = {
var idFactory = 0
() => {
val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int]
(x: Int) => {
if (fifoMap.contains(x)) fifoMap(x) else {
val out = idFactory
idFactory = idFactory + 1
fifoMap += (x -> out)
out
}
}
}
}
def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) {
val (io_in, edgesIn) = seqIn.unzip
val (io_out, edgesOut) = seqOut.unzip
// Not every master need connect to every slave on every channel; determine which connections are necessary
val reachableIO = edgesIn.map { cp => edgesOut.map { mp =>
cp.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)}}}}
}.toVector}.toVector
val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED)
}.toVector}.toVector
val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector}.toVector
val connectAIO = reachableIO
val connectBIO = probeIO
val connectCIO = releaseIO
val connectDIO = reachableIO
val connectEIO = releaseIO
def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } }
val connectAOI = transpose(connectAIO)
val connectBOI = transpose(connectBIO)
val connectCOI = transpose(connectCIO)
val connectDOI = transpose(connectDIO)
val connectEOI = transpose(connectEIO)
// Grab the port ID mapping
val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
// We need an intermediate size of bundle with the widest possible identifiers
val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params))
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
// Transform input bundle sources (sinks use global namespace on both sides)
val in = Wire(Vec(io_in.size, TLBundle(wide_bundle)))
for (i <- 0 until in.size) {
val r = inputIdRanges(i)
if (connectAIO(i).exists(x=>x)) {
in(i).a.bits.user := DontCare
in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll
in(i).a.bits.source := io_in(i).a.bits.source | r.start.U
} else {
in(i).a := DontCare
io_in(i).a := DontCare
in(i).a.valid := false.B
io_in(i).a.ready := true.B
}
if (connectBIO(i).exists(x=>x)) {
io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll
io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size)
} else {
in(i).b := DontCare
io_in(i).b := DontCare
in(i).b.ready := true.B
io_in(i).b.valid := false.B
}
if (connectCIO(i).exists(x=>x)) {
in(i).c.bits.user := DontCare
in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll
in(i).c.bits.source := io_in(i).c.bits.source | r.start.U
} else {
in(i).c := DontCare
io_in(i).c := DontCare
in(i).c.valid := false.B
io_in(i).c.ready := true.B
}
if (connectDIO(i).exists(x=>x)) {
io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll
io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size)
} else {
in(i).d := DontCare
io_in(i).d := DontCare
in(i).d.ready := true.B
io_in(i).d.valid := false.B
}
if (connectEIO(i).exists(x=>x)) {
in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll
} else {
in(i).e := DontCare
io_in(i).e := DontCare
in(i).e.valid := false.B
io_in(i).e.ready := true.B
}
}
// Transform output bundle sinks (sources use global namespace on both sides)
val out = Wire(Vec(io_out.size, TLBundle(wide_bundle)))
for (o <- 0 until out.size) {
val r = outputIdRanges(o)
if (connectAOI(o).exists(x=>x)) {
out(o).a.bits.user := DontCare
io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll
} else {
out(o).a := DontCare
io_out(o).a := DontCare
out(o).a.ready := true.B
io_out(o).a.valid := false.B
}
if (connectBOI(o).exists(x=>x)) {
out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll
} else {
out(o).b := DontCare
io_out(o).b := DontCare
out(o).b.valid := false.B
io_out(o).b.ready := true.B
}
if (connectCOI(o).exists(x=>x)) {
out(o).c.bits.user := DontCare
io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll
} else {
out(o).c := DontCare
io_out(o).c := DontCare
out(o).c.ready := true.B
io_out(o).c.valid := false.B
}
if (connectDOI(o).exists(x=>x)) {
out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll
out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U
} else {
out(o).d := DontCare
io_out(o).d := DontCare
out(o).d.valid := false.B
io_out(o).d.ready := true.B
}
if (connectEOI(o).exists(x=>x)) {
io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll
io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size)
} else {
out(o).e := DontCare
io_out(o).e := DontCare
out(o).e.ready := true.B
io_out(o).e.valid := false.B
}
}
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
// Based on input=>output connectivity, create per-input minimal address decode circuits
val requiredAC = (connectAIO ++ connectCIO).distinct
val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO =>
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
// Print the address mapping
if (false) {
println("Xbar mapping:")
route_addrs.foreach { p =>
print(" ")
p.foreach { a => print(s" ${a}") }
println("")
}
println("--")
}
(connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _)))
}.toMap
// Print the ID mapping
if (false) {
println(s"XBar mapping:")
(edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) =>
println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}")
}
println("")
}
val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) }
val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) }
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } }
val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } }
val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } }
val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) }
val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) }
val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) }
val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) }
val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) }
// Fanout the input sources to the output sinks
val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) })
val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) })
val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) })
val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) })
val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) })
// Arbitrate amongst the sources
for (o <- 0 until out.size) {
TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*)
TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*)
TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*)
filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B }
}
for (i <- 0 until in.size) {
TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*)
TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*)
filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B }
filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B }
}
}
def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val xbar = LazyModule(new TLXbar(policy, nameSuffix))
xbar.node
}
// Replicate an input port to each output port
def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = {
val filtered = Wire(Vec(select.size, chiselTypeOf(input)))
for (i <- 0 until select.size) {
filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits)
filtered(i).valid := input.valid && (select(i) || (select.size == 1).B)
}
input.ready := Mux1H(select, filtered.map(_.ready))
filtered
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Xbar"))
val xbar = LazyModule(new TLXbar)
xbar.node := TLDelayer(0.1) := model.node := fuzz.node
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val xbar = LazyModule(new TLXbar)
val fuzzers = (0 until nClients) map { n =>
val fuzz = LazyModule(new TLFuzzer(txns))
xbar.node := TLDelayer(0.1) := fuzz.node
fuzz
}
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzzers.last.module.io.finished
}
}
class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
| module TLXbar_dmixbar_i1_o2_a9d32s1k1z2u( // @[Xbar.scala:74:9]
input clock, // @[Xbar.scala:74:9]
input reset, // @[Xbar.scala:74:9]
output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [8:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_1_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_anon_out_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_out_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_1_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_out_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_0_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [8:0] auto_anon_out_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_out_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_0_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_0_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_out_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_0_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire out_0_d_bits_sink; // @[Xbar.scala:216:19]
wire auto_anon_in_a_valid_0 = auto_anon_in_a_valid; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_a_bits_opcode_0 = auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9]
wire [8:0] auto_anon_in_a_bits_address_0 = auto_anon_in_a_bits_address; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_in_a_bits_data_0 = auto_anon_in_a_bits_data; // @[Xbar.scala:74:9]
wire auto_anon_in_d_ready_0 = auto_anon_in_d_ready; // @[Xbar.scala:74:9]
wire auto_anon_out_1_a_ready_0 = auto_anon_out_1_a_ready; // @[Xbar.scala:74:9]
wire auto_anon_out_1_d_valid_0 = auto_anon_out_1_d_valid; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_1_d_bits_opcode_0 = auto_anon_out_1_d_bits_opcode; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_out_1_d_bits_data_0 = auto_anon_out_1_d_bits_data; // @[Xbar.scala:74:9]
wire auto_anon_out_0_a_ready_0 = auto_anon_out_0_a_ready; // @[Xbar.scala:74:9]
wire auto_anon_out_0_d_valid_0 = auto_anon_out_0_d_valid; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_0_d_bits_opcode_0 = auto_anon_out_0_d_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_out_0_d_bits_param_0 = auto_anon_out_0_d_bits_param; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_out_0_d_bits_size_0 = auto_anon_out_0_d_bits_size; // @[Xbar.scala:74:9]
wire auto_anon_out_0_d_bits_source_0 = auto_anon_out_0_d_bits_source; // @[Xbar.scala:74:9]
wire auto_anon_out_0_d_bits_sink_0 = auto_anon_out_0_d_bits_sink; // @[Xbar.scala:74:9]
wire auto_anon_out_0_d_bits_denied_0 = auto_anon_out_0_d_bits_denied; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_out_0_d_bits_data_0 = auto_anon_out_0_d_bits_data; // @[Xbar.scala:74:9]
wire auto_anon_out_0_d_bits_corrupt_0 = auto_anon_out_0_d_bits_corrupt; // @[Xbar.scala:74:9]
wire _readys_T_2 = reset; // @[Arbiter.scala:22:12]
wire [2:0] auto_anon_in_a_bits_param = 3'h0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_1_a_bits_param = 3'h0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_0_a_bits_param = 3'h0; // @[Xbar.scala:74:9]
wire [2:0] anonIn_a_bits_param = 3'h0; // @[MixedNode.scala:551:17]
wire [2:0] anonOut_a_bits_param = 3'h0; // @[MixedNode.scala:542:17]
wire [2:0] x1_anonOut_a_bits_param = 3'h0; // @[MixedNode.scala:542:17]
wire [2:0] in_0_a_bits_param = 3'h0; // @[Xbar.scala:159:18]
wire [2:0] out_0_a_bits_param = 3'h0; // @[Xbar.scala:216:19]
wire [2:0] out_1_a_bits_param = 3'h0; // @[Xbar.scala:216:19]
wire [2:0] _addressC_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _addressC_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _addressC_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _addressC_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _requestBOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] _requestBOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] _requestBOI_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] _requestBOI_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] _beatsBO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] _beatsBO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] _beatsBO_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] _beatsBO_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] _beatsCI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _beatsCI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _beatsCI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _beatsCI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] portsAOI_filtered_0_bits_param = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] portsAOI_filtered_1_bits_param = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] _portsBIO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] _portsBIO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] portsBIO_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] _portsBIO_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] _portsBIO_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] portsBIO_filtered_1_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] _portsCOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _portsCOI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _portsCOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _portsCOI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] portsCOI_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] portsCOI_filtered_0_bits_param = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] portsCOI_filtered_1_bits_opcode = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] portsCOI_filtered_1_bits_param = 3'h0; // @[Xbar.scala:352:24]
wire [1:0] auto_anon_in_a_bits_size = 2'h2; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_out_1_a_bits_size = 2'h2; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_out_1_d_bits_size = 2'h2; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_out_0_a_bits_size = 2'h2; // @[Xbar.scala:74:9]
wire [1:0] anonIn_a_bits_size = 2'h2; // @[MixedNode.scala:551:17]
wire [1:0] anonOut_a_bits_size = 2'h2; // @[MixedNode.scala:542:17]
wire [1:0] x1_anonOut_a_bits_size = 2'h2; // @[MixedNode.scala:542:17]
wire [1:0] x1_anonOut_d_bits_size = 2'h2; // @[MixedNode.scala:542:17]
wire [1:0] in_0_a_bits_size = 2'h2; // @[Xbar.scala:159:18]
wire [1:0] out_0_a_bits_size = 2'h2; // @[Xbar.scala:216:19]
wire [1:0] out_1_a_bits_size = 2'h2; // @[Xbar.scala:216:19]
wire [1:0] out_1_d_bits_size = 2'h2; // @[Xbar.scala:216:19]
wire [1:0] portsAOI_filtered_0_bits_size = 2'h2; // @[Xbar.scala:352:24]
wire [1:0] portsAOI_filtered_1_bits_size = 2'h2; // @[Xbar.scala:352:24]
wire [1:0] portsDIO_filtered_1_0_bits_size = 2'h2; // @[Xbar.scala:352:24]
wire auto_anon_in_a_bits_source = 1'h0; // @[Xbar.scala:74:9]
wire auto_anon_in_a_bits_corrupt = 1'h0; // @[Xbar.scala:74:9]
wire auto_anon_in_d_bits_source = 1'h0; // @[Xbar.scala:74:9]
wire auto_anon_out_1_a_bits_source = 1'h0; // @[Xbar.scala:74:9]
wire auto_anon_out_1_a_bits_corrupt = 1'h0; // @[Xbar.scala:74:9]
wire auto_anon_out_1_d_bits_source = 1'h0; // @[Xbar.scala:74:9]
wire auto_anon_out_1_d_bits_sink = 1'h0; // @[Xbar.scala:74:9]
wire auto_anon_out_1_d_bits_denied = 1'h0; // @[Xbar.scala:74:9]
wire auto_anon_out_1_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9]
wire auto_anon_out_0_a_bits_source = 1'h0; // @[Xbar.scala:74:9]
wire auto_anon_out_0_a_bits_corrupt = 1'h0; // @[Xbar.scala:74:9]
wire anonIn_a_bits_source = 1'h0; // @[MixedNode.scala:551:17]
wire anonIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_source = 1'h0; // @[MixedNode.scala:551:17]
wire anonOut_a_bits_source = 1'h0; // @[MixedNode.scala:542:17]
wire anonOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire x1_anonOut_a_bits_source = 1'h0; // @[MixedNode.scala:542:17]
wire x1_anonOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire x1_anonOut_d_bits_source = 1'h0; // @[MixedNode.scala:542:17]
wire x1_anonOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17]
wire x1_anonOut_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17]
wire x1_anonOut_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire in_0_a_bits_source = 1'h0; // @[Xbar.scala:159:18]
wire in_0_a_bits_corrupt = 1'h0; // @[Xbar.scala:159:18]
wire _in_0_a_bits_source_T = 1'h0; // @[Xbar.scala:166:55]
wire out_0_a_bits_source = 1'h0; // @[Xbar.scala:216:19]
wire out_0_a_bits_corrupt = 1'h0; // @[Xbar.scala:216:19]
wire out_1_a_bits_source = 1'h0; // @[Xbar.scala:216:19]
wire out_1_a_bits_corrupt = 1'h0; // @[Xbar.scala:216:19]
wire out_1_d_bits_source = 1'h0; // @[Xbar.scala:216:19]
wire out_1_d_bits_sink = 1'h0; // @[Xbar.scala:216:19]
wire out_1_d_bits_denied = 1'h0; // @[Xbar.scala:216:19]
wire out_1_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19]
wire _out_1_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53]
wire _addressC_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _addressC_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _addressC_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _addressC_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _addressC_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _addressC_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _addressC_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _addressC_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _requestBOI_WIRE_ready = 1'h0; // @[Bundles.scala:264:74]
wire _requestBOI_WIRE_valid = 1'h0; // @[Bundles.scala:264:74]
wire _requestBOI_WIRE_bits_source = 1'h0; // @[Bundles.scala:264:74]
wire _requestBOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire _requestBOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61]
wire _requestBOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61]
wire _requestBOI_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:264:61]
wire _requestBOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire _requestBOI_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74]
wire _requestBOI_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74]
wire _requestBOI_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:264:74]
wire _requestBOI_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire _requestBOI_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61]
wire _requestBOI_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61]
wire _requestBOI_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:264:61]
wire _requestBOI_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire _requestEIO_WIRE_ready = 1'h0; // @[Bundles.scala:267:74]
wire _requestEIO_WIRE_valid = 1'h0; // @[Bundles.scala:267:74]
wire _requestEIO_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74]
wire _requestEIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61]
wire _requestEIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61]
wire _requestEIO_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61]
wire _requestEIO_WIRE_2_ready = 1'h0; // @[Bundles.scala:267:74]
wire _requestEIO_WIRE_2_valid = 1'h0; // @[Bundles.scala:267:74]
wire _requestEIO_WIRE_2_bits_sink = 1'h0; // @[Bundles.scala:267:74]
wire _requestEIO_WIRE_3_ready = 1'h0; // @[Bundles.scala:267:61]
wire _requestEIO_WIRE_3_valid = 1'h0; // @[Bundles.scala:267:61]
wire _requestEIO_WIRE_3_bits_sink = 1'h0; // @[Bundles.scala:267:61]
wire beatsAI_decode = 1'h0; // @[Edges.scala:220:59]
wire beatsAI_0 = 1'h0; // @[Edges.scala:221:14]
wire _beatsBO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74]
wire _beatsBO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74]
wire _beatsBO_WIRE_bits_source = 1'h0; // @[Bundles.scala:264:74]
wire _beatsBO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire _beatsBO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61]
wire _beatsBO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61]
wire _beatsBO_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:264:61]
wire _beatsBO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire beatsBO_decode = 1'h0; // @[Edges.scala:220:59]
wire _beatsBO_opdata_T = 1'h0; // @[Edges.scala:97:37]
wire beatsBO_0 = 1'h0; // @[Edges.scala:221:14]
wire _beatsBO_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74]
wire _beatsBO_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74]
wire _beatsBO_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:264:74]
wire _beatsBO_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire _beatsBO_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61]
wire _beatsBO_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61]
wire _beatsBO_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:264:61]
wire _beatsBO_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire beatsBO_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire _beatsBO_opdata_T_1 = 1'h0; // @[Edges.scala:97:37]
wire beatsBO_1 = 1'h0; // @[Edges.scala:221:14]
wire _beatsCI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _beatsCI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _beatsCI_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _beatsCI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _beatsCI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _beatsCI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _beatsCI_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _beatsCI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire beatsCI_decode = 1'h0; // @[Edges.scala:220:59]
wire beatsCI_opdata = 1'h0; // @[Edges.scala:102:36]
wire beatsCI_0 = 1'h0; // @[Edges.scala:221:14]
wire beatsDO_decode = 1'h0; // @[Edges.scala:220:59]
wire beatsDO_0 = 1'h0; // @[Edges.scala:221:14]
wire beatsDO_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire beatsDO_1 = 1'h0; // @[Edges.scala:221:14]
wire _beatsEI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74]
wire _beatsEI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74]
wire _beatsEI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74]
wire _beatsEI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61]
wire _beatsEI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61]
wire _beatsEI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61]
wire portsAOI_filtered_0_bits_source = 1'h0; // @[Xbar.scala:352:24]
wire portsAOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire portsAOI_filtered_1_bits_source = 1'h0; // @[Xbar.scala:352:24]
wire portsAOI_filtered_1_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire _portsBIO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74]
wire _portsBIO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74]
wire _portsBIO_WIRE_bits_source = 1'h0; // @[Bundles.scala:264:74]
wire _portsBIO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire _portsBIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61]
wire _portsBIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61]
wire _portsBIO_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:264:61]
wire _portsBIO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire portsBIO_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire portsBIO_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire portsBIO_filtered_0_bits_source = 1'h0; // @[Xbar.scala:352:24]
wire portsBIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire _portsBIO_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire _portsBIO_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74]
wire _portsBIO_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74]
wire _portsBIO_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:264:74]
wire _portsBIO_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire _portsBIO_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61]
wire _portsBIO_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61]
wire _portsBIO_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:264:61]
wire _portsBIO_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire portsBIO_filtered_1_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire portsBIO_filtered_1_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire portsBIO_filtered_1_0_bits_source = 1'h0; // @[Xbar.scala:352:24]
wire portsBIO_filtered_1_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire _portsBIO_filtered_0_valid_T_3 = 1'h0; // @[Xbar.scala:355:40]
wire _portsCOI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _portsCOI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _portsCOI_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _portsCOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _portsCOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _portsCOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _portsCOI_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _portsCOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire portsCOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire portsCOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire portsCOI_filtered_0_bits_source = 1'h0; // @[Xbar.scala:352:24]
wire portsCOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire portsCOI_filtered_1_ready = 1'h0; // @[Xbar.scala:352:24]
wire portsCOI_filtered_1_valid = 1'h0; // @[Xbar.scala:352:24]
wire portsCOI_filtered_1_bits_source = 1'h0; // @[Xbar.scala:352:24]
wire portsCOI_filtered_1_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire _portsCOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire _portsCOI_filtered_1_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire _portsCOI_T = 1'h0; // @[Mux.scala:30:73]
wire _portsCOI_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _portsCOI_T_2 = 1'h0; // @[Mux.scala:30:73]
wire _portsCOI_WIRE_2 = 1'h0; // @[Mux.scala:30:73]
wire portsDIO_filtered_1_0_bits_source = 1'h0; // @[Xbar.scala:352:24]
wire portsDIO_filtered_1_0_bits_sink = 1'h0; // @[Xbar.scala:352:24]
wire portsDIO_filtered_1_0_bits_denied = 1'h0; // @[Xbar.scala:352:24]
wire portsDIO_filtered_1_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire _portsEOI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74]
wire _portsEOI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74]
wire _portsEOI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74]
wire _portsEOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61]
wire _portsEOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61]
wire _portsEOI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61]
wire portsEOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire portsEOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire portsEOI_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24]
wire portsEOI_filtered_1_ready = 1'h0; // @[Xbar.scala:352:24]
wire portsEOI_filtered_1_valid = 1'h0; // @[Xbar.scala:352:24]
wire portsEOI_filtered_1_bits_sink = 1'h0; // @[Xbar.scala:352:24]
wire _portsEOI_filtered_0_valid_T = 1'h0; // @[Xbar.scala:355:54]
wire _portsEOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire _portsEOI_filtered_1_valid_T = 1'h0; // @[Xbar.scala:355:54]
wire _portsEOI_filtered_1_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire _portsEOI_T = 1'h0; // @[Mux.scala:30:73]
wire _portsEOI_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _portsEOI_T_2 = 1'h0; // @[Mux.scala:30:73]
wire _portsEOI_WIRE_2 = 1'h0; // @[Mux.scala:30:73]
wire maskedBeats_0 = 1'h0; // @[Arbiter.scala:82:69]
wire maskedBeats_1 = 1'h0; // @[Arbiter.scala:82:69]
wire initBeats = 1'h0; // @[Arbiter.scala:84:44]
wire _state_WIRE_0 = 1'h0; // @[Arbiter.scala:88:34]
wire _state_WIRE_1 = 1'h0; // @[Arbiter.scala:88:34]
wire _in_0_d_bits_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _in_0_d_bits_T_7 = 1'h0; // @[Mux.scala:30:73]
wire _in_0_d_bits_T_10 = 1'h0; // @[Mux.scala:30:73]
wire _in_0_d_bits_T_13 = 1'h0; // @[Mux.scala:30:73]
wire [3:0] auto_anon_in_a_bits_mask = 4'hF; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_out_1_a_bits_mask = 4'hF; // @[Xbar.scala:74:9]
wire [3:0] auto_anon_out_0_a_bits_mask = 4'hF; // @[Xbar.scala:74:9]
wire [3:0] anonIn_a_bits_mask = 4'hF; // @[MixedNode.scala:551:17]
wire [3:0] anonOut_a_bits_mask = 4'hF; // @[MixedNode.scala:542:17]
wire [3:0] x1_anonOut_a_bits_mask = 4'hF; // @[MixedNode.scala:542:17]
wire [3:0] in_0_a_bits_mask = 4'hF; // @[Xbar.scala:159:18]
wire [3:0] out_0_a_bits_mask = 4'hF; // @[Xbar.scala:216:19]
wire [3:0] out_1_a_bits_mask = 4'hF; // @[Xbar.scala:216:19]
wire [3:0] portsAOI_filtered_0_bits_mask = 4'hF; // @[Xbar.scala:352:24]
wire [3:0] portsAOI_filtered_1_bits_mask = 4'hF; // @[Xbar.scala:352:24]
wire [1:0] auto_anon_out_1_d_bits_param = 2'h0; // @[Xbar.scala:74:9]
wire [1:0] x1_anonOut_d_bits_param = 2'h0; // @[MixedNode.scala:542:17]
wire [1:0] out_1_d_bits_param = 2'h0; // @[Xbar.scala:216:19]
wire [1:0] _addressC_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _addressC_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _requestBOI_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _requestBOI_WIRE_bits_size = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _requestBOI_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] _requestBOI_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] _requestBOI_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _requestBOI_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _requestBOI_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] _requestBOI_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] _beatsAI_decode_T_1 = 2'h0; // @[package.scala:243:76]
wire [1:0] _beatsBO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _beatsBO_WIRE_bits_size = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _beatsBO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] _beatsBO_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] _beatsBO_decode_T_2 = 2'h0; // @[package.scala:243:46]
wire [1:0] _beatsBO_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _beatsBO_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _beatsBO_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] _beatsBO_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] _beatsBO_decode_T_5 = 2'h0; // @[package.scala:243:46]
wire [1:0] _beatsCI_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _beatsCI_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _beatsCI_decode_T_2 = 2'h0; // @[package.scala:243:46]
wire [1:0] _beatsDO_decode_T_4 = 2'h0; // @[package.scala:243:76]
wire [1:0] _portsBIO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _portsBIO_WIRE_bits_size = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _portsBIO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] _portsBIO_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] portsBIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24]
wire [1:0] portsBIO_filtered_0_bits_size = 2'h0; // @[Xbar.scala:352:24]
wire [1:0] _portsBIO_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _portsBIO_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] _portsBIO_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] _portsBIO_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] portsBIO_filtered_1_0_bits_param = 2'h0; // @[Xbar.scala:352:24]
wire [1:0] portsBIO_filtered_1_0_bits_size = 2'h0; // @[Xbar.scala:352:24]
wire [1:0] _portsCOI_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _portsCOI_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] portsCOI_filtered_0_bits_size = 2'h0; // @[Xbar.scala:352:24]
wire [1:0] portsCOI_filtered_1_bits_size = 2'h0; // @[Xbar.scala:352:24]
wire [1:0] portsDIO_filtered_1_0_bits_param = 2'h0; // @[Xbar.scala:352:24]
wire [1:0] _in_0_d_bits_T_19 = 2'h0; // @[Mux.scala:30:73]
wire _requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107]
wire _requestCIO_T_9 = 1'h1; // @[Parameters.scala:137:59]
wire requestCIO_0_1 = 1'h1; // @[Xbar.scala:308:107]
wire requestBOI_0_0 = 1'h1; // @[Parameters.scala:46:9]
wire requestBOI_1_0 = 1'h1; // @[Parameters.scala:46:9]
wire requestDOI_1_0 = 1'h1; // @[Parameters.scala:46:9]
wire beatsBO_opdata = 1'h1; // @[Edges.scala:97:28]
wire beatsBO_opdata_1 = 1'h1; // @[Edges.scala:97:28]
wire _portsBIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire _portsBIO_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54]
wire _portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire _portsCOI_filtered_1_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire _portsDIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire _portsDIO_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54]
wire [31:0] _addressC_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _addressC_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _requestBOI_WIRE_bits_data = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] _requestBOI_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] _requestBOI_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] _requestBOI_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] _beatsBO_WIRE_bits_data = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] _beatsBO_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] _beatsBO_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] _beatsBO_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] _beatsCI_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _beatsCI_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _portsBIO_WIRE_bits_data = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] _portsBIO_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] portsBIO_filtered_0_bits_data = 32'h0; // @[Xbar.scala:352:24]
wire [31:0] _portsBIO_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] _portsBIO_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] portsBIO_filtered_1_0_bits_data = 32'h0; // @[Xbar.scala:352:24]
wire [31:0] _portsCOI_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _portsCOI_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] portsCOI_filtered_0_bits_data = 32'h0; // @[Xbar.scala:352:24]
wire [31:0] portsCOI_filtered_1_bits_data = 32'h0; // @[Xbar.scala:352:24]
wire [8:0] _addressC_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _addressC_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _requestCIO_T = 9'h0; // @[Parameters.scala:137:31]
wire [8:0] _requestCIO_T_5 = 9'h0; // @[Parameters.scala:137:31]
wire [8:0] _requestBOI_WIRE_bits_address = 9'h0; // @[Bundles.scala:264:74]
wire [8:0] _requestBOI_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:264:61]
wire [8:0] _requestBOI_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:264:74]
wire [8:0] _requestBOI_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:264:61]
wire [8:0] _beatsBO_WIRE_bits_address = 9'h0; // @[Bundles.scala:264:74]
wire [8:0] _beatsBO_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:264:61]
wire [8:0] _beatsBO_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:264:74]
wire [8:0] _beatsBO_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:264:61]
wire [8:0] _beatsCI_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _beatsCI_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _portsBIO_WIRE_bits_address = 9'h0; // @[Bundles.scala:264:74]
wire [8:0] _portsBIO_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:264:61]
wire [8:0] portsBIO_filtered_0_bits_address = 9'h0; // @[Xbar.scala:352:24]
wire [8:0] _portsBIO_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:264:74]
wire [8:0] _portsBIO_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:264:61]
wire [8:0] portsBIO_filtered_1_0_bits_address = 9'h0; // @[Xbar.scala:352:24]
wire [8:0] _portsCOI_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _portsCOI_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] portsCOI_filtered_0_bits_address = 9'h0; // @[Xbar.scala:352:24]
wire [8:0] portsCOI_filtered_1_bits_address = 9'h0; // @[Xbar.scala:352:24]
wire [3:0] _requestBOI_WIRE_bits_mask = 4'h0; // @[Bundles.scala:264:74]
wire [3:0] _requestBOI_WIRE_1_bits_mask = 4'h0; // @[Bundles.scala:264:61]
wire [3:0] _requestBOI_WIRE_2_bits_mask = 4'h0; // @[Bundles.scala:264:74]
wire [3:0] _requestBOI_WIRE_3_bits_mask = 4'h0; // @[Bundles.scala:264:61]
wire [3:0] _beatsBO_WIRE_bits_mask = 4'h0; // @[Bundles.scala:264:74]
wire [3:0] _beatsBO_WIRE_1_bits_mask = 4'h0; // @[Bundles.scala:264:61]
wire [3:0] _beatsBO_WIRE_2_bits_mask = 4'h0; // @[Bundles.scala:264:74]
wire [3:0] _beatsBO_WIRE_3_bits_mask = 4'h0; // @[Bundles.scala:264:61]
wire [3:0] _portsBIO_WIRE_bits_mask = 4'h0; // @[Bundles.scala:264:74]
wire [3:0] _portsBIO_WIRE_1_bits_mask = 4'h0; // @[Bundles.scala:264:61]
wire [3:0] portsBIO_filtered_0_bits_mask = 4'h0; // @[Xbar.scala:352:24]
wire [3:0] _portsBIO_WIRE_2_bits_mask = 4'h0; // @[Bundles.scala:264:74]
wire [3:0] _portsBIO_WIRE_3_bits_mask = 4'h0; // @[Bundles.scala:264:61]
wire [3:0] portsBIO_filtered_1_0_bits_mask = 4'h0; // @[Xbar.scala:352:24]
wire [1:0] _beatsAI_decode_T_2 = 2'h3; // @[package.scala:243:46]
wire [1:0] _beatsBO_decode_T_1 = 2'h3; // @[package.scala:243:76]
wire [1:0] _beatsBO_decode_T_4 = 2'h3; // @[package.scala:243:76]
wire [1:0] _beatsCI_decode_T_1 = 2'h3; // @[package.scala:243:76]
wire [1:0] _beatsDO_decode_T_5 = 2'h3; // @[package.scala:243:46]
wire [4:0] _beatsAI_decode_T = 5'hC; // @[package.scala:243:71]
wire [4:0] _beatsDO_decode_T_3 = 5'hC; // @[package.scala:243:71]
wire [4:0] _beatsBO_decode_T = 5'h3; // @[package.scala:243:71]
wire [4:0] _beatsBO_decode_T_3 = 5'h3; // @[package.scala:243:71]
wire [4:0] _beatsCI_decode_T = 5'h3; // @[package.scala:243:71]
wire [9:0] _requestCIO_T_1 = 10'h0; // @[Parameters.scala:137:41]
wire [9:0] _requestCIO_T_2 = 10'h0; // @[Parameters.scala:137:46]
wire [9:0] _requestCIO_T_3 = 10'h0; // @[Parameters.scala:137:46]
wire [9:0] _requestCIO_T_6 = 10'h0; // @[Parameters.scala:137:41]
wire [9:0] _requestCIO_T_7 = 10'h0; // @[Parameters.scala:137:46]
wire [9:0] _requestCIO_T_8 = 10'h0; // @[Parameters.scala:137:46]
wire anonIn_a_ready; // @[MixedNode.scala:551:17]
wire anonIn_a_valid = auto_anon_in_a_valid_0; // @[Xbar.scala:74:9]
wire [2:0] anonIn_a_bits_opcode = auto_anon_in_a_bits_opcode_0; // @[Xbar.scala:74:9]
wire [8:0] anonIn_a_bits_address = auto_anon_in_a_bits_address_0; // @[Xbar.scala:74:9]
wire [31:0] anonIn_a_bits_data = auto_anon_in_a_bits_data_0; // @[Xbar.scala:74:9]
wire anonIn_d_ready = auto_anon_in_d_ready_0; // @[Xbar.scala:74:9]
wire anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [1:0] anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [31:0] anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire x1_anonOut_a_ready = auto_anon_out_1_a_ready_0; // @[Xbar.scala:74:9]
wire x1_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] x1_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [6:0] x1_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [31:0] x1_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire x1_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire x1_anonOut_d_valid = auto_anon_out_1_d_valid_0; // @[Xbar.scala:74:9]
wire [2:0] x1_anonOut_d_bits_opcode = auto_anon_out_1_d_bits_opcode_0; // @[Xbar.scala:74:9]
wire [31:0] x1_anonOut_d_bits_data = auto_anon_out_1_d_bits_data_0; // @[Xbar.scala:74:9]
wire anonOut_a_ready = auto_anon_out_0_a_ready_0; // @[Xbar.scala:74:9]
wire anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [8:0] anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [31:0] anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire anonOut_d_ready; // @[MixedNode.scala:542:17]
wire anonOut_d_valid = auto_anon_out_0_d_valid_0; // @[Xbar.scala:74:9]
wire [2:0] anonOut_d_bits_opcode = auto_anon_out_0_d_bits_opcode_0; // @[Xbar.scala:74:9]
wire [1:0] anonOut_d_bits_param = auto_anon_out_0_d_bits_param_0; // @[Xbar.scala:74:9]
wire [1:0] anonOut_d_bits_size = auto_anon_out_0_d_bits_size_0; // @[Xbar.scala:74:9]
wire anonOut_d_bits_source = auto_anon_out_0_d_bits_source_0; // @[Xbar.scala:74:9]
wire anonOut_d_bits_sink = auto_anon_out_0_d_bits_sink_0; // @[Xbar.scala:74:9]
wire anonOut_d_bits_denied = auto_anon_out_0_d_bits_denied_0; // @[Xbar.scala:74:9]
wire [31:0] anonOut_d_bits_data = auto_anon_out_0_d_bits_data_0; // @[Xbar.scala:74:9]
wire anonOut_d_bits_corrupt = auto_anon_out_0_d_bits_corrupt_0; // @[Xbar.scala:74:9]
wire auto_anon_in_a_ready_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_in_d_bits_opcode_0; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_in_d_bits_param_0; // @[Xbar.scala:74:9]
wire [1:0] auto_anon_in_d_bits_size_0; // @[Xbar.scala:74:9]
wire auto_anon_in_d_bits_sink_0; // @[Xbar.scala:74:9]
wire auto_anon_in_d_bits_denied_0; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_in_d_bits_data_0; // @[Xbar.scala:74:9]
wire auto_anon_in_d_bits_corrupt_0; // @[Xbar.scala:74:9]
wire auto_anon_in_d_valid_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_1_a_bits_opcode_0; // @[Xbar.scala:74:9]
wire [6:0] auto_anon_out_1_a_bits_address_0; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_out_1_a_bits_data_0; // @[Xbar.scala:74:9]
wire auto_anon_out_1_a_valid_0; // @[Xbar.scala:74:9]
wire auto_anon_out_1_d_ready_0; // @[Xbar.scala:74:9]
wire [2:0] auto_anon_out_0_a_bits_opcode_0; // @[Xbar.scala:74:9]
wire [8:0] auto_anon_out_0_a_bits_address_0; // @[Xbar.scala:74:9]
wire [31:0] auto_anon_out_0_a_bits_data_0; // @[Xbar.scala:74:9]
wire auto_anon_out_0_a_valid_0; // @[Xbar.scala:74:9]
wire auto_anon_out_0_d_ready_0; // @[Xbar.scala:74:9]
wire in_0_a_ready; // @[Xbar.scala:159:18]
assign auto_anon_in_a_ready_0 = anonIn_a_ready; // @[Xbar.scala:74:9]
wire in_0_a_valid = anonIn_a_valid; // @[Xbar.scala:159:18]
wire [2:0] in_0_a_bits_opcode = anonIn_a_bits_opcode; // @[Xbar.scala:159:18]
wire [8:0] in_0_a_bits_address = anonIn_a_bits_address; // @[Xbar.scala:159:18]
wire [31:0] in_0_a_bits_data = anonIn_a_bits_data; // @[Xbar.scala:159:18]
wire in_0_d_ready = anonIn_d_ready; // @[Xbar.scala:159:18]
wire in_0_d_valid; // @[Xbar.scala:159:18]
assign auto_anon_in_d_valid_0 = anonIn_d_valid; // @[Xbar.scala:74:9]
wire [2:0] in_0_d_bits_opcode; // @[Xbar.scala:159:18]
assign auto_anon_in_d_bits_opcode_0 = anonIn_d_bits_opcode; // @[Xbar.scala:74:9]
wire [1:0] in_0_d_bits_param; // @[Xbar.scala:159:18]
assign auto_anon_in_d_bits_param_0 = anonIn_d_bits_param; // @[Xbar.scala:74:9]
wire [1:0] in_0_d_bits_size; // @[Xbar.scala:159:18]
assign auto_anon_in_d_bits_size_0 = anonIn_d_bits_size; // @[Xbar.scala:74:9]
wire in_0_d_bits_sink; // @[Xbar.scala:159:18]
assign auto_anon_in_d_bits_sink_0 = anonIn_d_bits_sink; // @[Xbar.scala:74:9]
wire in_0_d_bits_denied; // @[Xbar.scala:159:18]
assign auto_anon_in_d_bits_denied_0 = anonIn_d_bits_denied; // @[Xbar.scala:74:9]
wire [31:0] in_0_d_bits_data; // @[Xbar.scala:159:18]
assign auto_anon_in_d_bits_data_0 = anonIn_d_bits_data; // @[Xbar.scala:74:9]
wire in_0_d_bits_corrupt; // @[Xbar.scala:159:18]
assign auto_anon_in_d_bits_corrupt_0 = anonIn_d_bits_corrupt; // @[Xbar.scala:74:9]
wire out_0_a_ready = anonOut_a_ready; // @[Xbar.scala:216:19]
wire out_0_a_valid; // @[Xbar.scala:216:19]
assign auto_anon_out_0_a_valid_0 = anonOut_a_valid; // @[Xbar.scala:74:9]
wire [2:0] out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign auto_anon_out_0_a_bits_opcode_0 = anonOut_a_bits_opcode; // @[Xbar.scala:74:9]
wire [8:0] out_0_a_bits_address; // @[Xbar.scala:216:19]
assign auto_anon_out_0_a_bits_address_0 = anonOut_a_bits_address; // @[Xbar.scala:74:9]
wire [31:0] out_0_a_bits_data; // @[Xbar.scala:216:19]
assign auto_anon_out_0_a_bits_data_0 = anonOut_a_bits_data; // @[Xbar.scala:74:9]
wire out_0_d_ready; // @[Xbar.scala:216:19]
assign auto_anon_out_0_d_ready_0 = anonOut_d_ready; // @[Xbar.scala:74:9]
wire out_0_d_valid = anonOut_d_valid; // @[Xbar.scala:216:19]
wire [2:0] out_0_d_bits_opcode = anonOut_d_bits_opcode; // @[Xbar.scala:216:19]
wire [1:0] out_0_d_bits_param = anonOut_d_bits_param; // @[Xbar.scala:216:19]
wire [1:0] out_0_d_bits_size = anonOut_d_bits_size; // @[Xbar.scala:216:19]
wire out_0_d_bits_source = anonOut_d_bits_source; // @[Xbar.scala:216:19]
wire _out_0_d_bits_sink_T = anonOut_d_bits_sink; // @[Xbar.scala:251:53]
wire out_0_d_bits_denied = anonOut_d_bits_denied; // @[Xbar.scala:216:19]
wire [31:0] out_0_d_bits_data = anonOut_d_bits_data; // @[Xbar.scala:216:19]
wire out_0_d_bits_corrupt = anonOut_d_bits_corrupt; // @[Xbar.scala:216:19]
wire out_1_a_ready = x1_anonOut_a_ready; // @[Xbar.scala:216:19]
wire out_1_a_valid; // @[Xbar.scala:216:19]
assign auto_anon_out_1_a_valid_0 = x1_anonOut_a_valid; // @[Xbar.scala:74:9]
wire [2:0] out_1_a_bits_opcode; // @[Xbar.scala:216:19]
assign auto_anon_out_1_a_bits_opcode_0 = x1_anonOut_a_bits_opcode; // @[Xbar.scala:74:9]
assign auto_anon_out_1_a_bits_address_0 = x1_anonOut_a_bits_address; // @[Xbar.scala:74:9]
wire [31:0] out_1_a_bits_data; // @[Xbar.scala:216:19]
assign auto_anon_out_1_a_bits_data_0 = x1_anonOut_a_bits_data; // @[Xbar.scala:74:9]
wire out_1_d_ready; // @[Xbar.scala:216:19]
assign auto_anon_out_1_d_ready_0 = x1_anonOut_d_ready; // @[Xbar.scala:74:9]
wire out_1_d_valid = x1_anonOut_d_valid; // @[Xbar.scala:216:19]
wire [2:0] out_1_d_bits_opcode = x1_anonOut_d_bits_opcode; // @[Xbar.scala:216:19]
wire [31:0] out_1_d_bits_data = x1_anonOut_d_bits_data; // @[Xbar.scala:216:19]
wire _portsAOI_in_0_a_ready_WIRE; // @[Mux.scala:30:73]
assign anonIn_a_ready = in_0_a_ready; // @[Xbar.scala:159:18]
wire [2:0] portsAOI_filtered_0_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [2:0] portsAOI_filtered_1_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [8:0] _requestAIO_T = in_0_a_bits_address; // @[Xbar.scala:159:18]
wire [8:0] portsAOI_filtered_0_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [8:0] portsAOI_filtered_1_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [31:0] portsAOI_filtered_0_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24]
wire [31:0] portsAOI_filtered_1_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24]
wire _in_0_d_valid_T_4; // @[Arbiter.scala:96:24]
assign anonIn_d_valid = in_0_d_valid; // @[Xbar.scala:159:18]
wire [2:0] _in_0_d_bits_WIRE_opcode; // @[Mux.scala:30:73]
assign anonIn_d_bits_opcode = in_0_d_bits_opcode; // @[Xbar.scala:159:18]
wire [1:0] _in_0_d_bits_WIRE_param; // @[Mux.scala:30:73]
assign anonIn_d_bits_param = in_0_d_bits_param; // @[Xbar.scala:159:18]
wire [1:0] _in_0_d_bits_WIRE_size; // @[Mux.scala:30:73]
assign anonIn_d_bits_size = in_0_d_bits_size; // @[Xbar.scala:159:18]
wire _in_0_d_bits_WIRE_source; // @[Mux.scala:30:73]
wire _in_0_d_bits_WIRE_sink; // @[Mux.scala:30:73]
assign anonIn_d_bits_sink = in_0_d_bits_sink; // @[Xbar.scala:159:18]
wire _in_0_d_bits_WIRE_denied; // @[Mux.scala:30:73]
assign anonIn_d_bits_denied = in_0_d_bits_denied; // @[Xbar.scala:159:18]
wire [31:0] _in_0_d_bits_WIRE_data; // @[Mux.scala:30:73]
assign anonIn_d_bits_data = in_0_d_bits_data; // @[Xbar.scala:159:18]
wire _in_0_d_bits_WIRE_corrupt; // @[Mux.scala:30:73]
assign anonIn_d_bits_corrupt = in_0_d_bits_corrupt; // @[Xbar.scala:159:18]
wire in_0_d_bits_source; // @[Xbar.scala:159:18]
wire portsAOI_filtered_0_ready = out_0_a_ready; // @[Xbar.scala:216:19, :352:24]
wire portsAOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign anonOut_a_valid = out_0_a_valid; // @[Xbar.scala:216:19]
assign anonOut_a_bits_opcode = out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign anonOut_a_bits_address = out_0_a_bits_address; // @[Xbar.scala:216:19]
assign anonOut_a_bits_data = out_0_a_bits_data; // @[Xbar.scala:216:19]
wire portsDIO_filtered_0_ready; // @[Xbar.scala:352:24]
assign anonOut_d_ready = out_0_d_ready; // @[Xbar.scala:216:19]
wire _portsDIO_filtered_0_valid_T_1 = out_0_d_valid; // @[Xbar.scala:216:19, :355:40]
wire [2:0] portsDIO_filtered_0_bits_opcode = out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24]
wire [1:0] portsDIO_filtered_0_bits_param = out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24]
wire [1:0] portsDIO_filtered_0_bits_size = out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24]
wire portsDIO_filtered_0_bits_source = out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24]
wire portsDIO_filtered_0_bits_sink = out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24]
wire portsDIO_filtered_0_bits_denied = out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24]
wire [31:0] portsDIO_filtered_0_bits_data = out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24]
wire portsDIO_filtered_0_bits_corrupt = out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
wire portsAOI_filtered_1_ready = out_1_a_ready; // @[Xbar.scala:216:19, :352:24]
wire portsAOI_filtered_1_valid; // @[Xbar.scala:352:24]
assign x1_anonOut_a_valid = out_1_a_valid; // @[Xbar.scala:216:19]
assign x1_anonOut_a_bits_opcode = out_1_a_bits_opcode; // @[Xbar.scala:216:19]
assign x1_anonOut_a_bits_data = out_1_a_bits_data; // @[Xbar.scala:216:19]
wire portsDIO_filtered_1_0_ready; // @[Xbar.scala:352:24]
assign x1_anonOut_d_ready = out_1_d_ready; // @[Xbar.scala:216:19]
wire _portsDIO_filtered_0_valid_T_3 = out_1_d_valid; // @[Xbar.scala:216:19, :355:40]
wire [2:0] portsDIO_filtered_1_0_bits_opcode = out_1_d_bits_opcode; // @[Xbar.scala:216:19, :352:24]
wire [31:0] portsDIO_filtered_1_0_bits_data = out_1_d_bits_data; // @[Xbar.scala:216:19, :352:24]
wire [8:0] out_1_a_bits_address; // @[Xbar.scala:216:19]
assign out_0_d_bits_sink = _out_0_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53]
assign x1_anonOut_a_bits_address = out_1_a_bits_address[6:0]; // @[Xbar.scala:216:19, :222:41]
wire [9:0] _requestAIO_T_1 = {1'h0, _requestAIO_T}; // @[Parameters.scala:137:{31,41}]
wire [9:0] _requestAIO_T_2 = _requestAIO_T_1 & 10'h1C0; // @[Parameters.scala:137:{41,46}]
wire [9:0] _requestAIO_T_3 = _requestAIO_T_2; // @[Parameters.scala:137:46]
wire _requestAIO_T_4 = _requestAIO_T_3 == 10'h0; // @[Parameters.scala:137:{46,59}]
wire [8:0] _requestAIO_T_5 = {in_0_a_bits_address[8:7], in_0_a_bits_address[6:0] ^ 7'h44}; // @[Xbar.scala:159:18]
wire [9:0] _requestAIO_T_6 = {1'h0, _requestAIO_T_5}; // @[Parameters.scala:137:{31,41}]
wire [9:0] _requestAIO_T_7 = _requestAIO_T_6 & 10'h1F4; // @[Parameters.scala:137:{41,46}]
wire [9:0] _requestAIO_T_8 = _requestAIO_T_7; // @[Parameters.scala:137:46]
wire _requestAIO_T_9 = _requestAIO_T_8 == 10'h0; // @[Parameters.scala:137:{46,59}]
wire [8:0] _requestAIO_T_10 = {in_0_a_bits_address[8:7], in_0_a_bits_address[6:0] ^ 7'h58}; // @[Xbar.scala:159:18]
wire [9:0] _requestAIO_T_11 = {1'h0, _requestAIO_T_10}; // @[Parameters.scala:137:{31,41}]
wire [9:0] _requestAIO_T_12 = _requestAIO_T_11 & 10'h1F8; // @[Parameters.scala:137:{41,46}]
wire [9:0] _requestAIO_T_13 = _requestAIO_T_12; // @[Parameters.scala:137:46]
wire _requestAIO_T_14 = _requestAIO_T_13 == 10'h0; // @[Parameters.scala:137:{46,59}]
wire [8:0] _requestAIO_T_15 = {in_0_a_bits_address[8:7], in_0_a_bits_address[6:0] ^ 7'h60}; // @[Xbar.scala:159:18]
wire [9:0] _requestAIO_T_16 = {1'h0, _requestAIO_T_15}; // @[Parameters.scala:137:{31,41}]
wire [9:0] _requestAIO_T_17 = _requestAIO_T_16 & 10'h1E0; // @[Parameters.scala:137:{41,46}]
wire [9:0] _requestAIO_T_18 = _requestAIO_T_17; // @[Parameters.scala:137:46]
wire _requestAIO_T_19 = _requestAIO_T_18 == 10'h0; // @[Parameters.scala:137:{46,59}]
wire [8:0] _requestAIO_T_20 = {in_0_a_bits_address[8], in_0_a_bits_address[7:0] ^ 8'h80}; // @[Xbar.scala:159:18]
wire [9:0] _requestAIO_T_21 = {1'h0, _requestAIO_T_20}; // @[Parameters.scala:137:{31,41}]
wire [9:0] _requestAIO_T_22 = _requestAIO_T_21 & 10'h180; // @[Parameters.scala:137:{41,46}]
wire [9:0] _requestAIO_T_23 = _requestAIO_T_22; // @[Parameters.scala:137:46]
wire _requestAIO_T_24 = _requestAIO_T_23 == 10'h0; // @[Parameters.scala:137:{46,59}]
wire [8:0] _requestAIO_T_25 = in_0_a_bits_address ^ 9'h100; // @[Xbar.scala:159:18]
wire [9:0] _requestAIO_T_26 = {1'h0, _requestAIO_T_25}; // @[Parameters.scala:137:{31,41}]
wire [9:0] _requestAIO_T_27 = _requestAIO_T_26 & 10'h100; // @[Parameters.scala:137:{41,46}]
wire [9:0] _requestAIO_T_28 = _requestAIO_T_27; // @[Parameters.scala:137:46]
wire _requestAIO_T_29 = _requestAIO_T_28 == 10'h0; // @[Parameters.scala:137:{46,59}]
wire _requestAIO_T_30 = _requestAIO_T_4 | _requestAIO_T_9; // @[Xbar.scala:291:92]
wire _requestAIO_T_31 = _requestAIO_T_30 | _requestAIO_T_14; // @[Xbar.scala:291:92]
wire _requestAIO_T_32 = _requestAIO_T_31 | _requestAIO_T_19; // @[Xbar.scala:291:92]
wire _requestAIO_T_33 = _requestAIO_T_32 | _requestAIO_T_24; // @[Xbar.scala:291:92]
wire _requestAIO_T_34 = _requestAIO_T_33 | _requestAIO_T_29; // @[Xbar.scala:291:92]
wire requestAIO_0_0 = _requestAIO_T_34; // @[Xbar.scala:291:92, :307:107]
wire _portsAOI_filtered_0_valid_T = requestAIO_0_0; // @[Xbar.scala:307:107, :355:54]
wire [8:0] _requestAIO_T_35 = {in_0_a_bits_address[8:7], in_0_a_bits_address[6:0] ^ 7'h40}; // @[Xbar.scala:159:18]
wire [9:0] _requestAIO_T_36 = {1'h0, _requestAIO_T_35}; // @[Parameters.scala:137:{31,41}]
wire [9:0] _requestAIO_T_37 = _requestAIO_T_36 & 10'h1F4; // @[Parameters.scala:137:{41,46}]
wire [9:0] _requestAIO_T_38 = _requestAIO_T_37; // @[Parameters.scala:137:46]
wire _requestAIO_T_39 = _requestAIO_T_38 == 10'h0; // @[Parameters.scala:137:{46,59}]
wire [8:0] _requestAIO_T_40 = {in_0_a_bits_address[8:7], in_0_a_bits_address[6:0] ^ 7'h50}; // @[Xbar.scala:159:18]
wire [9:0] _requestAIO_T_41 = {1'h0, _requestAIO_T_40}; // @[Parameters.scala:137:{31,41}]
wire [9:0] _requestAIO_T_42 = _requestAIO_T_41 & 10'h1F8; // @[Parameters.scala:137:{41,46}]
wire [9:0] _requestAIO_T_43 = _requestAIO_T_42; // @[Parameters.scala:137:46]
wire _requestAIO_T_44 = _requestAIO_T_43 == 10'h0; // @[Parameters.scala:137:{46,59}]
wire _requestAIO_T_45 = _requestAIO_T_39 | _requestAIO_T_44; // @[Xbar.scala:291:92]
wire requestAIO_0_1 = _requestAIO_T_45; // @[Xbar.scala:291:92, :307:107]
wire _portsAOI_filtered_1_valid_T = requestAIO_0_1; // @[Xbar.scala:307:107, :355:54]
wire requestDOI_0_0 = ~out_0_d_bits_source; // @[Xbar.scala:216:19]
wire _beatsAI_opdata_T = in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18]
wire beatsAI_opdata = ~_beatsAI_opdata_T; // @[Edges.scala:92:{28,37}]
wire [4:0] _beatsDO_decode_T = 5'h3 << out_0_d_bits_size; // @[package.scala:243:71]
wire [1:0] _beatsDO_decode_T_1 = _beatsDO_decode_T[1:0]; // @[package.scala:243:{71,76}]
wire [1:0] _beatsDO_decode_T_2 = ~_beatsDO_decode_T_1; // @[package.scala:243:{46,76}]
wire beatsDO_opdata = out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19]
wire beatsDO_opdata_1 = out_1_d_bits_opcode[0]; // @[Xbar.scala:216:19]
wire _portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:355:40]
assign out_0_a_valid = portsAOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign out_0_a_bits_opcode = portsAOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign out_0_a_bits_address = portsAOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24]
assign out_0_a_bits_data = portsAOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24]
wire _portsAOI_filtered_1_valid_T_1; // @[Xbar.scala:355:40]
assign out_1_a_valid = portsAOI_filtered_1_valid; // @[Xbar.scala:216:19, :352:24]
assign out_1_a_bits_opcode = portsAOI_filtered_1_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign out_1_a_bits_address = portsAOI_filtered_1_bits_address; // @[Xbar.scala:216:19, :352:24]
assign out_1_a_bits_data = portsAOI_filtered_1_bits_data; // @[Xbar.scala:216:19, :352:24]
assign _portsAOI_filtered_0_valid_T_1 = in_0_a_valid & _portsAOI_filtered_0_valid_T; // @[Xbar.scala:159:18, :355:{40,54}]
assign portsAOI_filtered_0_valid = _portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign _portsAOI_filtered_1_valid_T_1 = in_0_a_valid & _portsAOI_filtered_1_valid_T; // @[Xbar.scala:159:18, :355:{40,54}]
assign portsAOI_filtered_1_valid = _portsAOI_filtered_1_valid_T_1; // @[Xbar.scala:352:24, :355:40]
wire _portsAOI_in_0_a_ready_T = requestAIO_0_0 & portsAOI_filtered_0_ready; // @[Mux.scala:30:73]
wire _portsAOI_in_0_a_ready_T_1 = requestAIO_0_1 & portsAOI_filtered_1_ready; // @[Mux.scala:30:73]
wire _portsAOI_in_0_a_ready_T_2 = _portsAOI_in_0_a_ready_T | _portsAOI_in_0_a_ready_T_1; // @[Mux.scala:30:73]
assign _portsAOI_in_0_a_ready_WIRE = _portsAOI_in_0_a_ready_T_2; // @[Mux.scala:30:73]
assign in_0_a_ready = _portsAOI_in_0_a_ready_WIRE; // @[Mux.scala:30:73]
wire _filtered_0_ready_T; // @[Arbiter.scala:94:31]
assign out_0_d_ready = portsDIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24]
wire portsDIO_filtered_0_valid; // @[Xbar.scala:352:24]
assign portsDIO_filtered_0_valid = _portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
wire _filtered_0_ready_T_1; // @[Arbiter.scala:94:31]
assign out_1_d_ready = portsDIO_filtered_1_0_ready; // @[Xbar.scala:216:19, :352:24]
wire portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24]
assign portsDIO_filtered_1_0_valid = _portsDIO_filtered_0_valid_T_3; // @[Xbar.scala:352:24, :355:40]
reg beatsLeft; // @[Arbiter.scala:60:30]
wire idle = ~beatsLeft; // @[Arbiter.scala:60:30, :61:28]
wire latch = idle & in_0_d_ready; // @[Xbar.scala:159:18]
wire [1:0] _readys_T = {portsDIO_filtered_1_0_valid, portsDIO_filtered_0_valid}; // @[Xbar.scala:352:24]
wire [1:0] readys_valid = _readys_T; // @[Arbiter.scala:21:23, :68:51]
wire _readys_T_1 = readys_valid == _readys_T; // @[Arbiter.scala:21:23, :22:19, :68:51]
wire _readys_T_3 = ~_readys_T_2; // @[Arbiter.scala:22:12]
wire _readys_T_4 = ~_readys_T_1; // @[Arbiter.scala:22:{12,19}]
reg [1:0] readys_mask; // @[Arbiter.scala:23:23]
wire [1:0] _readys_filter_T = ~readys_mask; // @[Arbiter.scala:23:23, :24:30]
wire [1:0] _readys_filter_T_1 = readys_valid & _readys_filter_T; // @[Arbiter.scala:21:23, :24:{28,30}]
wire [3:0] readys_filter = {_readys_filter_T_1, readys_valid}; // @[Arbiter.scala:21:23, :24:{21,28}]
wire [2:0] _readys_unready_T = readys_filter[3:1]; // @[package.scala:262:48]
wire [3:0] _readys_unready_T_1 = {readys_filter[3], readys_filter[2:0] | _readys_unready_T}; // @[package.scala:262:{43,48}]
wire [3:0] _readys_unready_T_2 = _readys_unready_T_1; // @[package.scala:262:43, :263:17]
wire [2:0] _readys_unready_T_3 = _readys_unready_T_2[3:1]; // @[package.scala:263:17]
wire [3:0] _readys_unready_T_4 = {readys_mask, 2'h0}; // @[Arbiter.scala:23:23, :25:66]
wire [3:0] readys_unready = {1'h0, _readys_unready_T_3} | _readys_unready_T_4; // @[Arbiter.scala:25:{52,58,66}]
wire [1:0] _readys_readys_T = readys_unready[3:2]; // @[Arbiter.scala:25:58, :26:29]
wire [1:0] _readys_readys_T_1 = readys_unready[1:0]; // @[Arbiter.scala:25:58, :26:48]
wire [1:0] _readys_readys_T_2 = _readys_readys_T & _readys_readys_T_1; // @[Arbiter.scala:26:{29,39,48}]
wire [1:0] readys_readys = ~_readys_readys_T_2; // @[Arbiter.scala:26:{18,39}]
wire [1:0] _readys_T_7 = readys_readys; // @[Arbiter.scala:26:18, :30:11]
wire _readys_T_5 = |readys_valid; // @[Arbiter.scala:21:23, :27:27]
wire _readys_T_6 = latch & _readys_T_5; // @[Arbiter.scala:27:{18,27}, :62:24]
wire [1:0] _readys_mask_T = readys_readys & readys_valid; // @[Arbiter.scala:21:23, :26:18, :28:29]
wire [2:0] _readys_mask_T_1 = {_readys_mask_T, 1'h0}; // @[package.scala:253:48]
wire [1:0] _readys_mask_T_2 = _readys_mask_T_1[1:0]; // @[package.scala:253:{48,53}]
wire [1:0] _readys_mask_T_3 = _readys_mask_T | _readys_mask_T_2; // @[package.scala:253:{43,53}]
wire [1:0] _readys_mask_T_4 = _readys_mask_T_3; // @[package.scala:253:43, :254:17]
wire _readys_T_8 = _readys_T_7[0]; // @[Arbiter.scala:30:11, :68:76]
wire readys_0 = _readys_T_8; // @[Arbiter.scala:68:{27,76}]
wire _readys_T_9 = _readys_T_7[1]; // @[Arbiter.scala:30:11, :68:76]
wire readys_1 = _readys_T_9; // @[Arbiter.scala:68:{27,76}]
wire _winner_T = readys_0 & portsDIO_filtered_0_valid; // @[Xbar.scala:352:24]
wire winner_0 = _winner_T; // @[Arbiter.scala:71:{27,69}]
wire _winner_T_1 = readys_1 & portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24]
wire winner_1 = _winner_T_1; // @[Arbiter.scala:71:{27,69}]
wire prefixOR_1 = winner_0; // @[Arbiter.scala:71:27, :76:48]
wire _prefixOR_T = prefixOR_1 | winner_1; // @[Arbiter.scala:71:27, :76:48]
wire _in_0_d_valid_T = portsDIO_filtered_0_valid | portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24] |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module TLBuffer_a14d64s8k1z4u( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [13:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [13:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
wire [1:0] _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21]
wire [3:0] _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [7:0] _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
wire _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21]
TLMonitor_33 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_nodeOut_a_q_io_enq_ready), // @[Decoupled.scala:362:21]
.io_in_a_valid (auto_in_a_valid),
.io_in_a_bits_opcode (auto_in_a_bits_opcode),
.io_in_a_bits_param (auto_in_a_bits_param),
.io_in_a_bits_size (auto_in_a_bits_size),
.io_in_a_bits_source (auto_in_a_bits_source),
.io_in_a_bits_address (auto_in_a_bits_address),
.io_in_a_bits_mask (auto_in_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_a_bits_corrupt),
.io_in_d_ready (auto_in_d_ready),
.io_in_d_valid (_nodeIn_d_q_io_deq_valid), // @[Decoupled.scala:362:21]
.io_in_d_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode), // @[Decoupled.scala:362:21]
.io_in_d_bits_param (_nodeIn_d_q_io_deq_bits_param), // @[Decoupled.scala:362:21]
.io_in_d_bits_size (_nodeIn_d_q_io_deq_bits_size), // @[Decoupled.scala:362:21]
.io_in_d_bits_source (_nodeIn_d_q_io_deq_bits_source), // @[Decoupled.scala:362:21]
.io_in_d_bits_sink (_nodeIn_d_q_io_deq_bits_sink), // @[Decoupled.scala:362:21]
.io_in_d_bits_denied (_nodeIn_d_q_io_deq_bits_denied), // @[Decoupled.scala:362:21]
.io_in_d_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt) // @[Decoupled.scala:362:21]
); // @[Nodes.scala:27:25]
Queue2_TLBundleA_a14d64s8k1z4u nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (_nodeOut_a_q_io_enq_ready),
.io_enq_valid (auto_in_a_valid),
.io_enq_bits_opcode (auto_in_a_bits_opcode),
.io_enq_bits_param (auto_in_a_bits_param),
.io_enq_bits_size (auto_in_a_bits_size),
.io_enq_bits_source (auto_in_a_bits_source),
.io_enq_bits_address (auto_in_a_bits_address),
.io_enq_bits_mask (auto_in_a_bits_mask),
.io_enq_bits_data (auto_in_a_bits_data),
.io_enq_bits_corrupt (auto_in_a_bits_corrupt),
.io_deq_ready (auto_out_a_ready),
.io_deq_valid (auto_out_a_valid),
.io_deq_bits_opcode (auto_out_a_bits_opcode),
.io_deq_bits_param (auto_out_a_bits_param),
.io_deq_bits_size (auto_out_a_bits_size),
.io_deq_bits_source (auto_out_a_bits_source),
.io_deq_bits_address (auto_out_a_bits_address),
.io_deq_bits_mask (auto_out_a_bits_mask),
.io_deq_bits_data (auto_out_a_bits_data),
.io_deq_bits_corrupt (auto_out_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a14d64s8k1z4u nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (auto_out_d_ready),
.io_enq_valid (auto_out_d_valid),
.io_enq_bits_opcode (auto_out_d_bits_opcode),
.io_enq_bits_size (auto_out_d_bits_size),
.io_enq_bits_source (auto_out_d_bits_source),
.io_enq_bits_corrupt (auto_out_d_bits_corrupt),
.io_deq_ready (auto_in_d_ready),
.io_deq_valid (_nodeIn_d_q_io_deq_valid),
.io_deq_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode),
.io_deq_bits_param (_nodeIn_d_q_io_deq_bits_param),
.io_deq_bits_size (_nodeIn_d_q_io_deq_bits_size),
.io_deq_bits_source (_nodeIn_d_q_io_deq_bits_source),
.io_deq_bits_sink (_nodeIn_d_q_io_deq_bits_sink),
.io_deq_bits_denied (_nodeIn_d_q_io_deq_bits_denied),
.io_deq_bits_data (auto_in_d_bits_data),
.io_deq_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21]
assign auto_in_d_valid = _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_opcode = _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_param = _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_size = _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_source = _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_sink = _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_denied = _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21]
assign auto_in_d_bits_corrupt = _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File regfile.scala:
//******************************************************************************
// Copyright (c) 2013 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Register File (Abstract class and Synthesizable RegFile)
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.exu
import scala.collection.mutable.ArrayBuffer
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v4.common._
import boom.v4.util._
abstract class RegisterFile[T <: Data](
dType: T,
numRegisters: Int,
numReadPorts: Int,
numWritePorts: Int)
(implicit p: Parameters) extends BoomModule
{
val io = IO(new BoomBundle {
val arb_read_reqs = Vec(numReadPorts, Flipped(Decoupled(UInt(log2Ceil(numRegisters).W))))
val rrd_read_resps = Vec(numReadPorts, Output(dType))
val write_ports = Vec(numWritePorts, Flipped(Valid(new Bundle {
val addr = Output(UInt(maxPregSz.W))
val data = Output(dType)
})))
})
// ensure there is only 1 writer per register (unless to preg0)
if (numWritePorts > 1) {
for (i <- 0 until (numWritePorts - 1)) {
for (j <- (i + 1) until numWritePorts) {
assert(!io.write_ports(i).valid ||
!io.write_ports(j).valid ||
(io.write_ports(i).bits.addr =/= io.write_ports(j).bits.addr),
"[regfile] too many writers a register")
}
}
}
}
class BankedRF[T <: Data](
dType: T,
numBanks: Int,
numLogicalReadPortsPerBank: Int,
numRegisters: Int,
numLogicalReadPorts: Int,
numPhysicalReadPorts: Int,
numWritePorts: Int,
bankedWritePortArray: Seq[Option[Int]],
typeStr: String
)(implicit p: Parameters)
extends RegisterFile(dType, numRegisters, numLogicalReadPorts, numWritePorts)
{
require(isPow2(numBanks))
require(numRegisters % numBanks == 0)
require(bankedWritePortArray.length == numWritePorts)
val numDedicatedWritePorts = bankedWritePortArray.flatten.length
val writePortsPerBank = if (numDedicatedWritePorts == 0) {
numWritePorts
} else {
numWritePorts - numDedicatedWritePorts + 1
}
def bankIdx(i: UInt): UInt = i(log2Ceil(numBanks)-1,0)
val rfs = (0 until numBanks) map { w => Module(new PartiallyPortedRF(
dType,
numRegisters / numBanks,
numLogicalReadPortsPerBank,
numPhysicalReadPorts,
writePortsPerBank,
typeStr + s" Bank ${w}"
)) }
if (numBanks == 1) {
require(numLogicalReadPortsPerBank == numLogicalReadPorts)
io <> rfs(0).io
} else {
val widxs = Array.fill(numBanks)(0)
for (i <- 0 until numWritePorts) {
if (bankedWritePortArray(i) != None) {
val bank = bankedWritePortArray(i).get
val widx = widxs(bank)
rfs(bank).io.write_ports(widx).valid := io.write_ports(i).valid
rfs(bank).io.write_ports(widx).bits.addr := io.write_ports(i).bits.addr >> log2Ceil(numBanks)
rfs(bank).io.write_ports(widx).bits.data := io.write_ports(i).bits.data
assert(!io.write_ports(i).valid || bankIdx(io.write_ports(i).bits.addr) === bank.U)
widxs(bank) = widx + 1
} else {
for (w <- 0 until numBanks) {
val widx = widxs(w)
rfs(w).io.write_ports(widx).valid := io.write_ports(i).valid && bankIdx(io.write_ports(i).bits.addr) === w.U
rfs(w).io.write_ports(widx).bits.addr := io.write_ports(i).bits.addr >> log2Ceil(numBanks)
rfs(w).io.write_ports(widx).bits.data := io.write_ports(i).bits.data
widxs(w) = widx + 1
}
}
}
require(widxs.forall(_ == writePortsPerBank), widxs.mkString(","))
if (numLogicalReadPortsPerBank == numLogicalReadPorts) {
for (i <- 0 until numLogicalReadPorts) {
val bidx = bankIdx(io.arb_read_reqs(i).bits)
for (w <- 0 until numBanks) {
rfs(w).io.arb_read_reqs(i).valid := io.arb_read_reqs(i).valid && bankIdx(io.arb_read_reqs(i).bits) === w.U
rfs(w).io.arb_read_reqs(i).bits := io.arb_read_reqs(i).bits >> log2Ceil(numBanks)
}
val arb_data_sel = UIntToOH(bidx)
val rrd_data_sel = RegNext(arb_data_sel)
io.arb_read_reqs(i).ready := Mux1H(arb_data_sel, rfs.map(_.io.arb_read_reqs(i).ready))
io.rrd_read_resps(i) := Mux1H(rrd_data_sel, rfs.map(_.io.rrd_read_resps(i)))
}
}
}
override def toString: String = rfs.map(_.toString).mkString
}
class PartiallyPortedRF[T <: Data](
dType: T,
numRegisters: Int,
numLogicalReadPorts: Int,
numPhysicalReadPorts: Int,
numWritePorts: Int,
typeStr: String
)(implicit p: Parameters)
extends RegisterFile(dType, numRegisters, numLogicalReadPorts, numWritePorts)
{
val rf = Module(new FullyPortedRF(
dType = dType,
numRegisters = numRegisters,
numReadPorts = numPhysicalReadPorts,
numWritePorts = numWritePorts,
typeStr = "Partially Ported " + typeStr,
))
rf.io.write_ports := io.write_ports
val port_issued = Array.fill(numPhysicalReadPorts) { false.B }
val port_addrs = Array.fill(numPhysicalReadPorts) { 0.U(log2Ceil(numRegisters).W) }
val data_sels = Wire(Vec(numLogicalReadPorts , UInt(numPhysicalReadPorts.W)))
data_sels := DontCare
for (i <- 0 until numLogicalReadPorts) {
var read_issued = false.B
for (j <- 0 until numPhysicalReadPorts) {
val issue_read = WireInit(false.B)
val use_port = WireInit(false.B)
when (!read_issued && !port_issued(j) && io.arb_read_reqs(i).valid) {
issue_read := true.B
use_port := true.B
data_sels(i) := UIntToOH(j.U)
}
val was_port_issued_yet = port_issued(j)
port_issued(j) = use_port || port_issued(j)
port_addrs(j) = port_addrs(j) | Mux(was_port_issued_yet || !use_port, 0.U, io.arb_read_reqs(i).bits)
read_issued = issue_read || read_issued
}
io.arb_read_reqs(i).ready := PopCount(io.arb_read_reqs.take(i).map(_.valid)) < numPhysicalReadPorts.U
assert(!(io.arb_read_reqs(i).fire && !read_issued))
}
for (j <- 0 until numPhysicalReadPorts) {
rf.io.arb_read_reqs(j).valid := port_issued(j)
rf.io.arb_read_reqs(j).bits := port_addrs(j)
assert(rf.io.arb_read_reqs(j).ready)
}
val rrd_data_sels = RegNext(data_sels)
for (i <- 0 until numLogicalReadPorts) {
io.rrd_read_resps(i) := Mux1H(rrd_data_sels(i).asBools, rf.io.rrd_read_resps)
}
override def toString: String = rf.toString
}
class FullyPortedRF[T <: Data](
dType: T,
numRegisters: Int,
numReadPorts: Int,
numWritePorts: Int,
typeStr: String,
)(implicit p: Parameters)
extends RegisterFile(dType, numRegisters, numReadPorts, numWritePorts)
{
val rf_cost = (numReadPorts + numWritePorts) * (numReadPorts + 2*numWritePorts)
override def toString: String = BoomCoreStringPrefix(
"==" + typeStr + " Regfile==",
"Num RF Read Ports : " + numReadPorts,
"Num RF Write Ports : " + numWritePorts,
"RF Cost (R+W)*(R+2W) : " + rf_cost)
io.arb_read_reqs.map(p => p.ready := true.B)
val regfile = Mem(numRegisters, dType)
(0 until numReadPorts) map {p => io.rrd_read_resps(p) := regfile(RegNext(io.arb_read_reqs(p).bits)) }
io.write_ports map { p => when (p.valid) { regfile(p.bits.addr) := p.bits.data }}
}
| module PartiallyPortedRF( // @[regfile.scala:128:7]
input clock, // @[regfile.scala:128:7]
input reset, // @[regfile.scala:128:7]
input io_arb_read_reqs_0_valid, // @[regfile.scala:31:14]
input [5:0] io_arb_read_reqs_0_bits, // @[regfile.scala:31:14]
input io_arb_read_reqs_1_valid, // @[regfile.scala:31:14]
input [5:0] io_arb_read_reqs_1_bits, // @[regfile.scala:31:14]
output io_arb_read_reqs_2_ready, // @[regfile.scala:31:14]
input io_arb_read_reqs_2_valid, // @[regfile.scala:31:14]
input [5:0] io_arb_read_reqs_2_bits, // @[regfile.scala:31:14]
output [64:0] io_rrd_read_resps_0, // @[regfile.scala:31:14]
output [64:0] io_rrd_read_resps_1, // @[regfile.scala:31:14]
output [64:0] io_rrd_read_resps_2, // @[regfile.scala:31:14]
input io_write_ports_0_valid, // @[regfile.scala:31:14]
input [6:0] io_write_ports_0_bits_addr, // @[regfile.scala:31:14]
input [64:0] io_write_ports_0_bits_data, // @[regfile.scala:31:14]
input io_write_ports_1_valid, // @[regfile.scala:31:14]
input [6:0] io_write_ports_1_bits_addr, // @[regfile.scala:31:14]
input [64:0] io_write_ports_1_bits_data // @[regfile.scala:31:14]
);
wire [64:0] _rf_io_rrd_read_resps_0; // @[regfile.scala:138:18]
wire [64:0] _rf_io_rrd_read_resps_1; // @[regfile.scala:138:18]
wire [64:0] _rf_io_rrd_read_resps_2; // @[regfile.scala:138:18]
wire io_arb_read_reqs_0_valid_0 = io_arb_read_reqs_0_valid; // @[regfile.scala:128:7]
wire [5:0] io_arb_read_reqs_0_bits_0 = io_arb_read_reqs_0_bits; // @[regfile.scala:128:7]
wire io_arb_read_reqs_1_valid_0 = io_arb_read_reqs_1_valid; // @[regfile.scala:128:7]
wire [5:0] io_arb_read_reqs_1_bits_0 = io_arb_read_reqs_1_bits; // @[regfile.scala:128:7]
wire io_arb_read_reqs_2_valid_0 = io_arb_read_reqs_2_valid; // @[regfile.scala:128:7]
wire [5:0] io_arb_read_reqs_2_bits_0 = io_arb_read_reqs_2_bits; // @[regfile.scala:128:7]
wire io_write_ports_0_valid_0 = io_write_ports_0_valid; // @[regfile.scala:128:7]
wire [6:0] io_write_ports_0_bits_addr_0 = io_write_ports_0_bits_addr; // @[regfile.scala:128:7]
wire [64:0] io_write_ports_0_bits_data_0 = io_write_ports_0_bits_data; // @[regfile.scala:128:7]
wire io_write_ports_1_valid_0 = io_write_ports_1_valid; // @[regfile.scala:128:7]
wire [6:0] io_write_ports_1_bits_addr_0 = io_write_ports_1_bits_addr; // @[regfile.scala:128:7]
wire [64:0] io_write_ports_1_bits_data_0 = io_write_ports_1_bits_data; // @[regfile.scala:128:7]
wire io_arb_read_reqs_0_ready = 1'h1; // @[regfile.scala:128:7]
wire io_arb_read_reqs_1_ready = 1'h1; // @[regfile.scala:128:7]
wire _io_arb_read_reqs_0_ready_T = 1'h1; // @[regfile.scala:167:82]
wire _io_arb_read_reqs_1_ready_T = 1'h1; // @[regfile.scala:167:82]
wire [3:0] _data_sels_0_T_2 = 4'h4; // @[OneHot.scala:58:35]
wire [3:0] _data_sels_1_T_2 = 4'h4; // @[OneHot.scala:58:35]
wire [3:0] _data_sels_2_T_2 = 4'h4; // @[OneHot.scala:58:35]
wire [1:0] _data_sels_0_T_1 = 2'h2; // @[OneHot.scala:58:35]
wire [1:0] _data_sels_1_T_1 = 2'h2; // @[OneHot.scala:58:35]
wire [1:0] _data_sels_2_T_1 = 2'h2; // @[OneHot.scala:58:35]
wire [1:0] _data_sels_0_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _data_sels_1_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _data_sels_2_T = 2'h1; // @[OneHot.scala:58:35]
wire issue_read = io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :155:32]
wire use_port = io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :156:30]
wire _io_arb_read_reqs_2_ready_T_2; // @[regfile.scala:167:82]
wire [64:0] _io_rrd_read_resps_0_WIRE; // @[Mux.scala:30:73]
wire [64:0] _io_rrd_read_resps_1_WIRE; // @[Mux.scala:30:73]
wire [64:0] _io_rrd_read_resps_2_WIRE; // @[Mux.scala:30:73]
wire io_arb_read_reqs_2_ready_0; // @[regfile.scala:128:7]
wire [64:0] io_rrd_read_resps_0_0; // @[regfile.scala:128:7]
wire [64:0] io_rrd_read_resps_1_0; // @[regfile.scala:128:7]
wire [64:0] io_rrd_read_resps_2_0; // @[regfile.scala:128:7]
wire [2:0] data_sels_0; // @[regfile.scala:149:25]
wire [2:0] data_sels_1; // @[regfile.scala:149:25]
wire [2:0] data_sels_2; // @[regfile.scala:149:25]
wire issue_read_1; // @[regfile.scala:155:32]
wire use_port_1; // @[regfile.scala:156:30]
wire _T_21 = ~issue_read & io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,45}]
assign issue_read_1 = _T_21; // @[regfile.scala:155:32, :157:45]
assign use_port_1 = _T_21; // @[regfile.scala:156:30, :157:45]
wire _T_27 = issue_read_1 | issue_read; // @[regfile.scala:155:32, :165:32]
wire issue_read_2; // @[regfile.scala:155:32]
wire use_port_2; // @[regfile.scala:156:30]
wire _T_31 = ~_T_27 & io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :157:{13,45}, :165:32]
assign issue_read_2 = _T_31; // @[regfile.scala:155:32, :157:45]
assign use_port_2 = _T_31; // @[regfile.scala:156:30, :157:45]
assign data_sels_0 = _T_31 ? 3'h4 : _T_21 ? 3'h2 : 3'h1; // @[regfile.scala:149:25, :157:{45,75}, :160:22]
wire issue_read_3; // @[regfile.scala:155:32]
wire use_port_3; // @[regfile.scala:156:30]
wire _T_48 = ~use_port & io_arb_read_reqs_1_valid_0; // @[regfile.scala:128:7, :156:30, :157:{29,45}]
assign issue_read_3 = _T_48; // @[regfile.scala:155:32, :157:45]
assign use_port_3 = _T_48; // @[regfile.scala:156:30, :157:45]
wire _T_49 = use_port_3 | use_port; // @[regfile.scala:156:30, :163:33]
wire issue_read_4; // @[regfile.scala:155:32]
wire use_port_4; // @[regfile.scala:156:30]
wire _T_58 = ~issue_read_3 & ~use_port_1 & io_arb_read_reqs_1_valid_0; // @[regfile.scala:128:7, :155:32, :156:30, :157:{13,26,29,45}]
assign issue_read_4 = _T_58; // @[regfile.scala:155:32, :157:{26,45}]
assign use_port_4 = _T_58; // @[regfile.scala:156:30, :157:{26,45}]
wire _T_59 = use_port_4 | use_port_1; // @[regfile.scala:156:30, :163:33]
wire _T_64 = issue_read_4 | issue_read_3; // @[regfile.scala:155:32, :165:32]
wire issue_read_5; // @[regfile.scala:155:32]
wire use_port_5; // @[regfile.scala:156:30]
wire _T_68 = ~_T_64 & ~use_port_2 & io_arb_read_reqs_1_valid_0; // @[regfile.scala:128:7, :156:30, :157:{13,26,29,45}, :165:32]
assign issue_read_5 = _T_68; // @[regfile.scala:155:32, :157:{26,45}]
assign use_port_5 = _T_68; // @[regfile.scala:156:30, :157:{26,45}]
assign data_sels_1 = _T_68 ? 3'h4 : _T_58 ? 3'h2 : 3'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22]
wire _T_69 = use_port_5 | use_port_2; // @[regfile.scala:156:30, :163:33]
wire issue_read_6; // @[regfile.scala:155:32]
wire use_port_6; // @[regfile.scala:156:30]
wire _T_85 = ~_T_49 & io_arb_read_reqs_2_valid_0; // @[regfile.scala:128:7, :157:{29,45}, :163:33]
assign issue_read_6 = _T_85; // @[regfile.scala:155:32, :157:45]
assign use_port_6 = _T_85; // @[regfile.scala:156:30, :157:45]
wire issue_read_7; // @[regfile.scala:155:32]
wire use_port_7; // @[regfile.scala:156:30]
wire _T_95 = ~issue_read_6 & ~_T_59 & io_arb_read_reqs_2_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,26,29,45}, :163:33]
assign issue_read_7 = _T_95; // @[regfile.scala:155:32, :157:{26,45}]
assign use_port_7 = _T_95; // @[regfile.scala:156:30, :157:{26,45}]
wire _T_101 = issue_read_7 | issue_read_6; // @[regfile.scala:155:32, :165:32]
wire issue_read_8; // @[regfile.scala:155:32]
wire use_port_8; // @[regfile.scala:156:30]
wire _T_105 = ~_T_101 & ~_T_69 & io_arb_read_reqs_2_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32]
assign issue_read_8 = _T_105; // @[regfile.scala:155:32, :157:{26,45}]
assign use_port_8 = _T_105; // @[regfile.scala:156:30, :157:{26,45}]
assign data_sels_2 = _T_105 ? 3'h4 : _T_95 ? 3'h2 : 3'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22]
wire [1:0] _io_arb_read_reqs_2_ready_T = {1'h0, io_arb_read_reqs_0_valid_0} + {1'h0, io_arb_read_reqs_1_valid_0}; // @[regfile.scala:128:7, :167:42]
wire [1:0] _io_arb_read_reqs_2_ready_T_1 = _io_arb_read_reqs_2_ready_T; // @[regfile.scala:167:42]
assign _io_arb_read_reqs_2_ready_T_2 = _io_arb_read_reqs_2_ready_T_1 != 2'h3; // @[regfile.scala:167:{42,82}]
assign io_arb_read_reqs_2_ready_0 = _io_arb_read_reqs_2_ready_T_2; // @[regfile.scala:128:7, :167:82] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_238( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File RegisterFile.scala:
package saturn.backend
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.tile.{CoreModule}
import freechips.rocketchip.util._
import saturn.common._
class OldestRRArbiter(val n: Int)(implicit p: Parameters) extends Module {
val io = IO(new ArbiterIO(new VectorReadReq, n))
val arb = Module(new RRArbiter(new VectorReadReq, n))
io <> arb.io
val oldest_oh = io.in.map(i => i.valid && i.bits.oldest)
//assert(PopCount(oldest_oh) <= 1.U)
when (oldest_oh.orR) {
io.chosen := VecInit(oldest_oh).asUInt
io.out.valid := true.B
io.out.bits := Mux1H(oldest_oh, io.in.map(_.bits))
for (i <- 0 until n) {
io.in(i).ready := oldest_oh(i) && io.out.ready
}
}
}
class RegisterReadXbar(n: Int, banks: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val in = Vec(n, Flipped(new VectorReadIO))
val out = Vec(banks, new VectorReadIO)
})
val arbs = Seq.fill(banks) { Module(new OldestRRArbiter(n)) }
for (i <- 0 until banks) {
io.out(i).req <> arbs(i).io.out
}
val bankOffset = log2Ceil(banks)
for (i <- 0 until n) {
val bank_sel = if (bankOffset == 0) true.B else UIntToOH(io.in(i).req.bits.eg(bankOffset-1,0))
for (j <- 0 until banks) {
arbs(j).io.in(i).valid := io.in(i).req.valid && bank_sel(j)
arbs(j).io.in(i).bits.eg := io.in(i).req.bits.eg >> bankOffset
arbs(j).io.in(i).bits.oldest := io.in(i).req.bits.oldest
}
io.in(i).req.ready := Mux1H(bank_sel, arbs.map(_.io.in(i).ready))
io.in(i).resp := Mux1H(bank_sel, io.out.map(_.resp))
}
}
class RegisterFileBank(reads: Int, maskReads: Int, rows: Int, maskRows: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val read = Vec(reads, Flipped(new VectorReadIO))
val mask_read = Vec(maskReads, Flipped(new VectorReadIO))
val write = Input(Valid(new VectorWrite(dLen)))
val ll_write = Flipped(Decoupled(new VectorWrite(dLen)))
})
val ll_write_valid = RegInit(false.B)
val ll_write_bits = Reg(new VectorWrite(dLen))
val vrf = Mem(rows, Vec(dLen, Bool()))
val v0_mask = Mem(maskRows, Vec(dLen, Bool()))
for (read <- io.read) {
read.req.ready := !(ll_write_valid && read.req.bits.eg === ll_write_bits.eg)
read.resp := DontCare
when (read.req.valid) {
read.resp := vrf.read(read.req.bits.eg).asUInt
}
}
for (mask_read <- io.mask_read) {
mask_read.req.ready := !(ll_write_valid && mask_read.req.bits.eg === ll_write_bits.eg)
mask_read.resp := DontCare
when (mask_read.req.valid) {
mask_read.resp := v0_mask.read(mask_read.req.bits.eg).asUInt
}
}
val write = WireInit(io.write)
io.ll_write.ready := false.B
if (vParams.vrfHiccupBuffer) {
when (!io.write.valid) { // drain hiccup buffer
write.valid := ll_write_valid || io.ll_write.valid
write.bits := Mux(ll_write_valid, ll_write_bits, io.ll_write.bits)
ll_write_valid := false.B
when (io.ll_write.valid && ll_write_valid) {
ll_write_valid := true.B
ll_write_bits := io.ll_write.bits
}
io.ll_write.ready := true.B
} .elsewhen (!ll_write_valid) { // fill hiccup buffer
when (io.ll_write.valid) {
ll_write_valid := true.B
ll_write_bits := io.ll_write.bits
}
io.ll_write.ready := true.B
}
} else {
when (!io.write.valid) {
io.ll_write.ready := true.B
write.valid := io.ll_write.valid
write.bits := io.ll_write.bits
}
}
when (write.valid) {
vrf.write(
write.bits.eg,
VecInit(write.bits.data.asBools),
write.bits.mask.asBools)
when (write.bits.eg < maskRows.U) {
v0_mask.write(
write.bits.eg,
VecInit(write.bits.data.asBools),
write.bits.mask.asBools)
}
}
}
class RegisterFile(reads: Seq[Int], maskReads: Seq[Int], pipeWrites: Int, llWrites: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val nBanks = vParams.vrfBanking
// Support 1, 2, and 4 banks for the VRF
require(nBanks == 1 || nBanks == 2 || nBanks == 4)
val io = IO(new Bundle {
val read = MixedVec(reads.map(rc => Vec(rc, Flipped(new VectorReadIO))))
val mask_read = MixedVec(maskReads.map(rc => Vec(rc, Flipped(new VectorReadIO))))
val pipe_writes = Vec(pipeWrites, Input(Valid(new VectorWrite(dLen))))
val ll_writes = Vec(llWrites, Flipped(Decoupled(new VectorWrite(dLen))))
})
val vrf = Seq.fill(nBanks) { Module(new RegisterFileBank(reads.size, maskReads.size, egsTotal/nBanks, if (egsPerVReg < nBanks) 1 else egsPerVReg / nBanks)) }
reads.zipWithIndex.foreach { case (rc, i) =>
val xbar = Module(new RegisterReadXbar(rc, nBanks))
vrf.zipWithIndex.foreach { case (bank, j) =>
bank.io.read(i) <> xbar.io.out(j)
}
xbar.io.in <> io.read(i)
}
maskReads.zipWithIndex.foreach { case (rc, i) =>
val mask_xbar = Module(new RegisterReadXbar(rc, nBanks))
vrf.zipWithIndex.foreach { case (bank, j) =>
bank.io.mask_read(i) <> mask_xbar.io.out(j)
}
mask_xbar.io.in <> io.mask_read(i)
}
io.ll_writes.foreach(_.ready := false.B)
vrf.zipWithIndex.foreach { case (rf, i) =>
val bank_match = io.pipe_writes.map { w => (w.bits.bankId === i.U) && w.valid }
val bank_write_data = Mux1H(bank_match, io.pipe_writes.map(_.bits.data))
val bank_write_mask = Mux1H(bank_match, io.pipe_writes.map(_.bits.mask))
val bank_write_eg = Mux1H(bank_match, io.pipe_writes.map(_.bits.eg))
val bank_write_valid = bank_match.orR
rf.io.write.valid := bank_write_valid
rf.io.write.bits.data := bank_write_data
rf.io.write.bits.mask := bank_write_mask
rf.io.write.bits.eg := bank_write_eg >> vrfBankBits
when (bank_write_valid) { PopCount(bank_match) === 1.U }
val ll_arb = Module(new Arbiter(new VectorWrite(dLen), llWrites))
rf.io.ll_write <> ll_arb.io.out
io.ll_writes.zipWithIndex.foreach { case (w, j) =>
ll_arb.io.in(j).valid := w.valid && w.bits.bankId === i.U
ll_arb.io.in(j).bits.eg := w.bits.eg >> vrfBankBits
ll_arb.io.in(j).bits.data := w.bits.data
ll_arb.io.in(j).bits.mask := w.bits.mask
when (ll_arb.io.in(j).ready && w.bits.bankId === i.U) {
w.ready := true.B
}
}
}
}
| module OldestRRArbiter_12( // @[RegisterFile.scala:10:7]
input clock, // @[RegisterFile.scala:10:7]
output io_in_0_ready, // @[RegisterFile.scala:11:14]
input io_in_0_valid, // @[RegisterFile.scala:11:14]
input [5:0] io_in_0_bits_eg, // @[RegisterFile.scala:11:14]
input io_in_0_bits_oldest, // @[RegisterFile.scala:11:14]
output io_in_1_ready, // @[RegisterFile.scala:11:14]
input io_in_1_valid, // @[RegisterFile.scala:11:14]
input [5:0] io_in_1_bits_eg, // @[RegisterFile.scala:11:14]
input io_in_1_bits_oldest, // @[RegisterFile.scala:11:14]
output io_in_2_ready, // @[RegisterFile.scala:11:14]
input io_in_2_valid, // @[RegisterFile.scala:11:14]
input [5:0] io_in_2_bits_eg, // @[RegisterFile.scala:11:14]
input io_in_2_bits_oldest, // @[RegisterFile.scala:11:14]
output io_in_3_ready, // @[RegisterFile.scala:11:14]
input io_in_3_valid, // @[RegisterFile.scala:11:14]
input [5:0] io_in_3_bits_eg, // @[RegisterFile.scala:11:14]
input io_in_3_bits_oldest, // @[RegisterFile.scala:11:14]
output io_in_4_ready, // @[RegisterFile.scala:11:14]
input io_in_4_valid, // @[RegisterFile.scala:11:14]
input [5:0] io_in_4_bits_eg, // @[RegisterFile.scala:11:14]
input io_in_4_bits_oldest, // @[RegisterFile.scala:11:14]
output io_in_5_ready, // @[RegisterFile.scala:11:14]
input io_in_5_valid, // @[RegisterFile.scala:11:14]
input [5:0] io_in_5_bits_eg, // @[RegisterFile.scala:11:14]
input io_out_ready, // @[RegisterFile.scala:11:14]
output [5:0] io_out_bits_eg // @[RegisterFile.scala:11:14]
);
wire _arb_io_in_0_ready; // @[RegisterFile.scala:13:19]
wire _arb_io_in_1_ready; // @[RegisterFile.scala:13:19]
wire _arb_io_in_2_ready; // @[RegisterFile.scala:13:19]
wire _arb_io_in_3_ready; // @[RegisterFile.scala:13:19]
wire _arb_io_in_4_ready; // @[RegisterFile.scala:13:19]
wire _arb_io_in_5_ready; // @[RegisterFile.scala:13:19]
wire [5:0] _arb_io_out_bits_eg; // @[RegisterFile.scala:13:19]
wire oldest_oh_0 = io_in_0_valid & io_in_0_bits_oldest; // @[RegisterFile.scala:15:42]
wire oldest_oh_1 = io_in_1_valid & io_in_1_bits_oldest; // @[RegisterFile.scala:15:42]
wire oldest_oh_2 = io_in_2_valid & io_in_2_bits_oldest; // @[RegisterFile.scala:15:42]
wire oldest_oh_3 = io_in_3_valid & io_in_3_bits_oldest; // @[RegisterFile.scala:15:42]
wire oldest_oh_4 = io_in_4_valid & io_in_4_bits_oldest; // @[RegisterFile.scala:15:42]
wire _GEN = oldest_oh_0 | oldest_oh_1 | oldest_oh_2 | oldest_oh_3 | oldest_oh_4; // @[RegisterFile.scala:15:42]
RRArbiter_12 arb ( // @[RegisterFile.scala:13:19]
.clock (clock),
.io_in_0_ready (_arb_io_in_0_ready),
.io_in_0_valid (io_in_0_valid),
.io_in_0_bits_eg (io_in_0_bits_eg),
.io_in_1_ready (_arb_io_in_1_ready),
.io_in_1_valid (io_in_1_valid),
.io_in_1_bits_eg (io_in_1_bits_eg),
.io_in_2_ready (_arb_io_in_2_ready),
.io_in_2_valid (io_in_2_valid),
.io_in_2_bits_eg (io_in_2_bits_eg),
.io_in_3_ready (_arb_io_in_3_ready),
.io_in_3_valid (io_in_3_valid),
.io_in_3_bits_eg (io_in_3_bits_eg),
.io_in_4_ready (_arb_io_in_4_ready),
.io_in_4_valid (io_in_4_valid),
.io_in_4_bits_eg (io_in_4_bits_eg),
.io_in_5_ready (_arb_io_in_5_ready),
.io_in_5_valid (io_in_5_valid),
.io_in_5_bits_eg (io_in_5_bits_eg),
.io_out_ready (io_out_ready),
.io_out_bits_eg (_arb_io_out_bits_eg)
); // @[RegisterFile.scala:13:19]
assign io_in_0_ready = _GEN ? oldest_oh_0 & io_out_ready : _arb_io_in_0_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :15:42, :17:24, :22:{22,38}]
assign io_in_1_ready = _GEN ? oldest_oh_1 & io_out_ready : _arb_io_in_1_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :15:42, :17:24, :22:{22,38}]
assign io_in_2_ready = _GEN ? oldest_oh_2 & io_out_ready : _arb_io_in_2_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :15:42, :17:24, :22:{22,38}]
assign io_in_3_ready = _GEN ? oldest_oh_3 & io_out_ready : _arb_io_in_3_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :15:42, :17:24, :22:{22,38}]
assign io_in_4_ready = _GEN ? oldest_oh_4 & io_out_ready : _arb_io_in_4_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :15:42, :17:24, :22:{22,38}]
assign io_in_5_ready = ~_GEN & _arb_io_in_5_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :17:24, :22:22]
assign io_out_bits_eg = _GEN ? (oldest_oh_0 ? io_in_0_bits_eg : 6'h0) | (oldest_oh_1 ? io_in_1_bits_eg : 6'h0) | (oldest_oh_2 ? io_in_2_bits_eg : 6'h0) | (oldest_oh_3 ? io_in_3_bits_eg : 6'h0) | (oldest_oh_4 ? io_in_4_bits_eg : 6'h0) : _arb_io_out_bits_eg; // @[Mux.scala:30:73]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_20( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [13:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [26:0] _GEN = {23'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [13:0] address; // @[Monitor.scala:391:22]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [519:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire [127:0] _GEN_0 = {121'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [127:0] _GEN_3 = {121'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
reg [519:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_EntryData_48( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_0 = io_x_ae; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae = io_x_ae_0; // @[package.scala:267:30]
wire io_y_sw = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr = io_x_sr_0; // @[package.scala:267:30]
wire io_y_pw = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr = io_x_pr_0; // @[package.scala:267:30]
wire io_y_pal = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File IngressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class IngressUnit(
ingressNodeId: Int,
cParam: IngressChannelParams,
outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean,
combineSAST: Boolean,
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits)))
}
val io = IO(new IngressUnitIO)
val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2,
flow=combineRCVA))
assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR))
route_buffer.io.enq.bits.head := io.in.bits.head
route_buffer.io.enq.bits.tail := io.in.bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
route_buffer.io.enq.bits.flow := DontCare
} else {
route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U
route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U
route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U
route_buffer.io.enq.bits.flow.egress_node := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNode.U)
)
route_buffer.io.enq.bits.flow.egress_node_id := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNodeId.U)
)
}
route_buffer.io.enq.bits.payload := io.in.bits.payload
route_buffer.io.enq.bits.virt_channel_id := DontCare
io.router_req.bits.src_virt_id := 0.U
io.router_req.bits.flow := route_buffer.io.enq.bits.flow
val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U
route_buffer.io.enq.valid := io.in.valid && (
io.router_req.ready || !io.in.bits.head || at_dest)
io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest
io.in.ready := route_buffer.io.enq.ready && (
io.router_req.ready || !io.in.bits.head || at_dest)
route_q.io.enq.valid := io.router_req.fire
route_q.io.enq.bits := io.router_resp
when (io.in.fire && io.in.bits.head && at_dest) {
route_q.io.enq.valid := true.B
route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (egressParams(o).egressId.U === io.in.bits.egress_id) {
route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B
}
}
}
assert(!(route_q.io.enq.valid && !route_q.io.enq.ready))
val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams),
1, pipe=true))
vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits
io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel
io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow
io.vcalloc_req.bits.in_vc := 0.U
val head = route_buffer.io.deq.bits.head
val tail = route_buffer.io.deq.bits.tail
vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head)
)
io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid &&
head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready)
route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head) &&
(vcalloc_q.io.enq.ready || !head))
route_q.io.deq.ready := (route_buffer.io.deq.fire && tail)
vcalloc_q.io.enq.valid := io.vcalloc_req.fire
vcalloc_q.io.enq.bits := io.vcalloc_resp
assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready))
io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel
io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail
val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail
io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire
val out_bundle = if (combineSAST) {
Wire(Valid(new SwitchBundle(outParams, egressParams)))
} else {
Reg(Valid(new SwitchBundle(outParams, egressParams)))
}
io.out(0) := out_bundle
out_bundle.valid := vcalloc_buffer.io.deq.fire
out_bundle.bits.flit := vcalloc_buffer.io.deq.bits
out_bundle.bits.flit.virt_channel_id := 0.U
val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq
out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh,
vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq)
io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready
io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready
// TODO: We should not generate input/ingress/output/egress units for untraversable channels
if (!cParam.traversable) {
io.in.ready := false.B
io.router_req.valid := false.B
io.router_req.bits := DontCare
io.vcalloc_req.valid := false.B
io.vcalloc_req.bits := DontCare
io.salloc_req.foreach(_.valid := false.B)
io.salloc_req.foreach(_.bits := DontCare)
io.out.foreach(_.valid := false.B)
io.out.foreach(_.bits := DontCare)
}
}
| module IngressUnit_41( // @[IngressUnit.scala:11:7]
input clock, // @[IngressUnit.scala:11:7]
input reset, // @[IngressUnit.scala:11:7]
input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_3_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_3_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_3_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_1_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_1, // @[IngressUnit.scala:24:14]
input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_3_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14]
output io_out_0_valid, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14]
output [36:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14]
output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14]
output [3:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14]
output io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14]
output io_in_ready, // @[IngressUnit.scala:24:14]
input io_in_valid, // @[IngressUnit.scala:24:14]
input io_in_bits_head, // @[IngressUnit.scala:24:14]
input io_in_bits_tail, // @[IngressUnit.scala:24:14]
input [36:0] io_in_bits_payload, // @[IngressUnit.scala:24:14]
input [4:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14]
);
wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_3_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30]
wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23]
wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23]
wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28]
wire [36:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28]
wire [3:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28]
wire [3:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T = io_in_bits_egress_id == 5'hE; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_1 = io_in_bits_egress_id == 5'hF; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_2 = io_in_bits_egress_id == 5'h10; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_3 = io_in_bits_egress_id == 5'h11; // @[IngressUnit.scala:30:72]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_10 = {1'h0, (_route_buffer_io_enq_bits_flow_egress_node_id_T ? 3'h5 : 3'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_1 ? 3'h6 : 3'h0)} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_2 ? 4'h9 : 4'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_3 ? 4'hA : 4'h0); // @[Mux.scala:30:73]
wire _GEN = _route_buffer_io_enq_ready & io_in_valid & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_10 == 4'h8; // @[Mux.scala:30:73]
wire route_q_io_enq_valid = _GEN | io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_10 != 4'h8; // @[Mux.scala:30:73]
wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}]
wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29]
wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_72( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [31:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7]
wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54]
wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
class IntXbar()(implicit p: Parameters) extends LazyModule
{
val intnode = new IntNexusNode(
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) },
sourceFn = { seq =>
IntSourcePortParameters((seq zip seq.map(_.num).scanLeft(0)(_+_).init).map {
case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o)))
}.flatten)
})
{
override def circuitIdentity = outputs == 1 && inputs == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
override def desiredName = s"IntXbar_i${intnode.in.size}_o${intnode.out.size}"
val cat = intnode.in.map { case (i, e) => i.take(e.source.num) }.flatten
intnode.out.foreach { case (o, _) => o := cat }
}
}
class IntSyncXbar()(implicit p: Parameters) extends LazyModule
{
val intnode = new IntSyncNexusNode(
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) },
sourceFn = { seq =>
IntSourcePortParameters((seq zip seq.map(_.num).scanLeft(0)(_+_).init).map {
case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o)))
}.flatten)
})
{
override def circuitIdentity = outputs == 1 && inputs == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncXbar_i${intnode.in.size}_o${intnode.out.size}"
val cat = intnode.in.map { case (i, e) => i.sync.take(e.source.num) }.flatten
intnode.out.foreach { case (o, _) => o.sync := cat }
}
}
object IntXbar {
def apply()(implicit p: Parameters): IntNode = {
val xbar = LazyModule(new IntXbar)
xbar.intnode
}
}
object IntSyncXbar {
def apply()(implicit p: Parameters): IntSyncNode = {
val xbar = LazyModule(new IntSyncXbar)
xbar.intnode
}
}
| module IntXbar_i2_o1( // @[Xbar.scala:22:9]
input auto_anon_in_1_0, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_1, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_0, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_0, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_2 // @[LazyModuleImp.scala:107:25]
);
wire auto_anon_in_1_0_0 = auto_anon_in_1_0; // @[Xbar.scala:22:9]
wire auto_anon_in_1_1_0 = auto_anon_in_1_1; // @[Xbar.scala:22:9]
wire auto_anon_in_0_0_0 = auto_anon_in_0_0; // @[Xbar.scala:22:9]
wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire anonIn_1_0 = auto_anon_in_1_0_0; // @[Xbar.scala:22:9]
wire anonIn_1_1 = auto_anon_in_1_1_0; // @[Xbar.scala:22:9]
wire anonIn_0 = auto_anon_in_0_0_0; // @[Xbar.scala:22:9]
wire anonOut_0; // @[MixedNode.scala:542:17]
wire anonOut_1; // @[MixedNode.scala:542:17]
wire anonOut_2; // @[MixedNode.scala:542:17]
wire auto_anon_out_0_0; // @[Xbar.scala:22:9]
wire auto_anon_out_1_0; // @[Xbar.scala:22:9]
wire auto_anon_out_2_0; // @[Xbar.scala:22:9]
assign anonOut_0 = anonIn_0; // @[MixedNode.scala:542:17, :551:17]
assign anonOut_1 = anonIn_1_0; // @[MixedNode.scala:542:17, :551:17]
assign anonOut_2 = anonIn_1_1; // @[MixedNode.scala:542:17, :551:17]
assign auto_anon_out_0_0 = anonOut_0; // @[Xbar.scala:22:9]
assign auto_anon_out_1_0 = anonOut_1; // @[Xbar.scala:22:9]
assign auto_anon_out_2_0 = anonOut_2; // @[Xbar.scala:22:9]
assign auto_anon_out_0 = auto_anon_out_0_0; // @[Xbar.scala:22:9]
assign auto_anon_out_1 = auto_anon_out_1_0; // @[Xbar.scala:22:9]
assign auto_anon_out_2 = auto_anon_out_2_0; // @[Xbar.scala:22:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_215( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_176( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_312 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RouteComputer.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.experimental.decode.{TruthTable, decoder}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import freechips.rocketchip.rocket.DecodeLogic
import constellation.channel._
import constellation.routing.{FlowRoutingBundle, FlowRoutingInfo}
import constellation.noc.{HasNoCParams}
class RouteComputerReq(implicit val p: Parameters) extends Bundle with HasNoCParams {
val src_virt_id = UInt(virtualChannelBits.W)
val flow = new FlowRoutingBundle
}
class RouteComputerResp(
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle
with HasRouterOutputParams {
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
class RouteComputer(
val routerParams: RouterParams,
val inParams: Seq[ChannelParams],
val outParams: Seq[ChannelParams],
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module
with HasRouterParams
with HasRouterInputParams
with HasRouterOutputParams
with HasNoCParams {
val io = IO(new Bundle {
val req = MixedVec(allInParams.map { u => Flipped(Decoupled(new RouteComputerReq)) })
val resp = MixedVec(allInParams.map { u => Output(new RouteComputerResp(outParams, egressParams)) })
})
(io.req zip io.resp).zipWithIndex.map { case ((req, resp), i) =>
req.ready := true.B
if (outParams.size == 0) {
assert(!req.valid)
resp.vc_sel := DontCare
} else {
def toUInt(t: (Int, FlowRoutingInfo)): UInt = {
val l2 = (BigInt(t._1) << req.bits.flow.vnet_id .getWidth) | t._2.vNetId
val l3 = ( l2 << req.bits.flow.ingress_node .getWidth) | t._2.ingressNode
val l4 = ( l3 << req.bits.flow.ingress_node_id.getWidth) | t._2.ingressNodeId
val l5 = ( l4 << req.bits.flow.egress_node .getWidth) | t._2.egressNode
val l6 = ( l5 << req.bits.flow.egress_node_id .getWidth) | t._2.egressNodeId
l6.U(req.bits.getWidth.W)
}
val flow = req.bits.flow
val table = allInParams(i).possibleFlows.toSeq.distinct.map { pI =>
allInParams(i).channelRoutingInfos.map { cI =>
var row: String = "b"
(0 until nOutputs).foreach { o =>
(0 until outParams(o).nVirtualChannels).foreach { outVId =>
row = row + (if (routingRelation(cI, outParams(o).channelRoutingInfos(outVId), pI)) "1" else "0")
}
}
((cI.vc, pI), row)
}
}.flatten
val addr = req.bits.asUInt
val width = outParams.map(_.nVirtualChannels).reduce(_+_)
val decoded = if (table.size > 0) {
val truthTable = TruthTable(
table.map { e => (BitPat(toUInt(e._1)), BitPat(e._2)) },
BitPat("b" + "?" * width)
)
Reverse(decoder(addr, truthTable))
} else {
0.U(width.W)
}
var idx = 0
(0 until nAllOutputs).foreach { o =>
if (o < nOutputs) {
(0 until outParams(o).nVirtualChannels).foreach { outVId =>
resp.vc_sel(o)(outVId) := decoded(idx)
idx += 1
}
} else {
resp.vc_sel(o)(0) := false.B
}
}
}
}
}
| module RouteComputer_18( // @[RouteComputer.scala:29:7]
input [1:0] io_req_0_bits_src_virt_id, // @[RouteComputer.scala:40:14]
input io_req_0_bits_flow_vnet_id, // @[RouteComputer.scala:40:14]
input [3:0] io_req_0_bits_flow_ingress_node, // @[RouteComputer.scala:40:14]
input io_req_0_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14]
input [3:0] io_req_0_bits_flow_egress_node, // @[RouteComputer.scala:40:14]
input io_req_0_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_0_0, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_0_1, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_0_2, // @[RouteComputer.scala:40:14]
output io_resp_0_vc_sel_0_3 // @[RouteComputer.scala:40:14]
);
wire [11:0] decoded_invInputs = ~{io_req_0_bits_src_virt_id[0], io_req_0_bits_flow_vnet_id, io_req_0_bits_flow_ingress_node, io_req_0_bits_flow_ingress_node_id, io_req_0_bits_flow_egress_node, io_req_0_bits_flow_egress_node_id}; // @[pla.scala:78:21]
assign io_resp_0_vc_sel_0_0 = |{&{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[9], decoded_invInputs[10]}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node[0], decoded_invInputs[3], decoded_invInputs[9], decoded_invInputs[10]}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node[1], decoded_invInputs[3], decoded_invInputs[9], decoded_invInputs[10]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_0_vc_sel_0_1 = &{decoded_invInputs[0], io_req_0_bits_flow_egress_node[3], decoded_invInputs[9], decoded_invInputs[10], io_req_0_bits_src_virt_id[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}]
assign io_resp_0_vc_sel_0_2 = |{&{decoded_invInputs[0], io_req_0_bits_flow_egress_node[1], io_req_0_bits_flow_vnet_id}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node[0], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[3], io_req_0_bits_flow_vnet_id, decoded_invInputs[11]}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node[0], io_req_0_bits_flow_egress_node[2], decoded_invInputs[4], decoded_invInputs[5], decoded_invInputs[6], decoded_invInputs[7], io_req_0_bits_flow_ingress_node[2], io_req_0_bits_flow_ingress_node[3], io_req_0_bits_flow_vnet_id, decoded_invInputs[11]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign io_resp_0_vc_sel_0_3 = &{decoded_invInputs[0], io_req_0_bits_flow_vnet_id, io_req_0_bits_src_virt_id[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_170( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_185 io_out_sink_extend ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_97( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [27:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_wo_ready_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_wo_ready_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_4_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_5_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_wo_ready_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_wo_ready_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [2050:0] _c_opcodes_set_T_1 = 2051'h0; // @[Monitor.scala:767:54]
wire [2050:0] _c_sizes_set_T_1 = 2051'h0; // @[Monitor.scala:768:52]
wire [10:0] _c_opcodes_set_T = 11'h0; // @[Monitor.scala:767:79]
wire [10:0] _c_sizes_set_T = 11'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [255:0] _c_set_wo_ready_T = 256'h1; // @[OneHot.scala:58:35]
wire [255:0] _c_set_T = 256'h1; // @[OneHot.scala:58:35]
wire [975:0] c_opcodes_set = 976'h0; // @[Monitor.scala:740:34]
wire [975:0] c_sizes_set = 976'h0; // @[Monitor.scala:741:34]
wire [243:0] c_set = 244'h0; // @[Monitor.scala:738:34]
wire [243:0] c_set_wo_ready = 244'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [7:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 8'hF4; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [27:0] _is_aligned_T = {22'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 28'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [7:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [7:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [7:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [7:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [7:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [7:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [7:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [7:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [7:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [7:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 8'hF4; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_672 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_672; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_672; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [7:0] source; // @[Monitor.scala:390:22]
reg [27:0] address; // @[Monitor.scala:391:22]
wire _T_745 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_745; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_745; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_745; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [7:0] source_1; // @[Monitor.scala:541:22]
reg [243:0] inflight; // @[Monitor.scala:614:27]
reg [975:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [975:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [243:0] a_set; // @[Monitor.scala:626:34]
wire [243:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [975:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [975:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [10:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [10:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [10:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [10:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [10:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [10:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [10:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [10:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [10:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [975:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [975:0] _a_opcode_lookup_T_6 = {972'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [975:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[975:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [975:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [975:0] _a_size_lookup_T_6 = {972'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [975:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[975:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [255:0] _GEN_2 = 256'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [255:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[243:0] : 244'h0; // @[OneHot.scala:58:35]
wire _T_598 = _T_672 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_598 ? _a_set_T[243:0] : 244'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [10:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [10:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [10:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [2050:0] _a_opcodes_set_T_1 = {2047'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[975:0] : 976'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [2050:0] _a_sizes_set_T_1 = {2047'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[975:0] : 976'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [243:0] d_clr; // @[Monitor.scala:664:34]
wire [243:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [975:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [975:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [255:0] _GEN_5 = 256'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[243:0] : 244'h0; // @[OneHot.scala:58:35]
wire _T_613 = _T_745 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_613 ? _d_clr_T[243:0] : 244'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_5 = 2063'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[975:0] : 976'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [2062:0] _d_sizes_clr_T_5 = 2063'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[975:0] : 976'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [243:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [243:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [243:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [975:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [975:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [975:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [975:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [975:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [975:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [243:0] inflight_1; // @[Monitor.scala:726:35]
wire [243:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [975:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [975:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [975:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [975:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [975:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [975:0] _c_opcode_lookup_T_6 = {972'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [975:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[975:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [975:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [975:0] _c_size_lookup_T_6 = {972'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [975:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[975:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [243:0] d_clr_1; // @[Monitor.scala:774:34]
wire [243:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [975:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [975:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_716 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_716 & d_release_ack_1 ? _d_clr_wo_ready_T_1[243:0] : 244'h0; // @[OneHot.scala:58:35]
wire _T_698 = _T_745 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_698 ? _d_clr_T_1[243:0] : 244'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_11 = 2063'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_698 ? _d_opcodes_clr_T_11[975:0] : 976'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [2062:0] _d_sizes_clr_T_11 = 2063'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_698 ? _d_sizes_clr_T_11[975:0] : 976'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 8'h0; // @[Monitor.scala:36:7, :795:113]
wire [243:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [243:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [975:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [975:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [975:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [975:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_70( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire [15:0] _roundMask_T_5 = 16'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_4 = 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_10 = 16'hFF00; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_13 = 12'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_14 = 16'hFF0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_15 = 16'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_20 = 16'hF0F0; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_23 = 14'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_24 = 16'h3C3C; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_25 = 16'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_30 = 16'hCCCC; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_33 = 15'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_34 = 16'h6666; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_35 = 16'h5555; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_40 = 16'hAAAA; // @[primitives.scala:77:20]
wire [25:0] _roundedSig_T_15 = 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:24]
wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14]
wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18]
wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18]
wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16]
wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16]
wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:284:13]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:90:53]
wire _roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:169:38]
wire _unboundedRange_roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:207:38]
wire _common_underflow_T_7 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:222:49]
wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:32]
wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:60]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53]
wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53]
wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53]
wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53]
wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53]
wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27]
wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63]
wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42]
wire _roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:171:29]
wire _roundedSig_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:181:42]
wire _unboundedRange_roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:209:29]
wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60]
wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45]
wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42]
wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39]
wire notNaN_isSpecialInfOut = io_in_isInf_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :236:49]
wire [26:0] adjustedSig = io_in_sig_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :114:22]
wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33]
wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66]
wire doShiftSigDown1 = adjustedSig[26]; // @[RoundAnyRawFNToRecFN.scala:114:22, :120:57]
wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37]
wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31]
wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16]
wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31]
wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50]
wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:200:31]
wire common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:217:40]
wire common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49]
wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire [8:0] _roundMask_T = io_in_sExp_0[8:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :156:37]
wire [8:0] _roundMask_T_1 = ~_roundMask_T; // @[primitives.scala:52:21]
wire roundMask_msb = _roundMask_T_1[8]; // @[primitives.scala:52:21, :58:25]
wire [7:0] roundMask_lsbs = _roundMask_T_1[7:0]; // @[primitives.scala:52:21, :59:26]
wire roundMask_msb_1 = roundMask_lsbs[7]; // @[primitives.scala:58:25, :59:26]
wire [6:0] roundMask_lsbs_1 = roundMask_lsbs[6:0]; // @[primitives.scala:59:26]
wire roundMask_msb_2 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire roundMask_msb_3 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire [5:0] roundMask_lsbs_2 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [5:0] roundMask_lsbs_3 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> roundMask_lsbs_2); // @[primitives.scala:59:26, :76:56]
wire [21:0] _roundMask_T_2 = roundMask_shift[63:42]; // @[primitives.scala:76:56, :78:22]
wire [15:0] _roundMask_T_3 = _roundMask_T_2[15:0]; // @[primitives.scala:77:20, :78:22]
wire [7:0] _roundMask_T_6 = _roundMask_T_3[15:8]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_7 = {8'h0, _roundMask_T_6}; // @[primitives.scala:77:20]
wire [7:0] _roundMask_T_8 = _roundMask_T_3[7:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_9 = {_roundMask_T_8, 8'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_11 = _roundMask_T_9 & 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_12 = _roundMask_T_7 | _roundMask_T_11; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_16 = _roundMask_T_12[15:4]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_17 = {4'h0, _roundMask_T_16 & 12'hF0F}; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_18 = _roundMask_T_12[11:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_19 = {_roundMask_T_18, 4'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_21 = _roundMask_T_19 & 16'hF0F0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_22 = _roundMask_T_17 | _roundMask_T_21; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_26 = _roundMask_T_22[15:2]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_27 = {2'h0, _roundMask_T_26 & 14'h3333}; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_28 = _roundMask_T_22[13:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_29 = {_roundMask_T_28, 2'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_31 = _roundMask_T_29 & 16'hCCCC; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_32 = _roundMask_T_27 | _roundMask_T_31; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_36 = _roundMask_T_32[15:1]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_37 = {1'h0, _roundMask_T_36 & 15'h5555}; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_38 = _roundMask_T_32[14:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_39 = {_roundMask_T_38, 1'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_41 = _roundMask_T_39 & 16'hAAAA; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_42 = _roundMask_T_37 | _roundMask_T_41; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_43 = _roundMask_T_2[21:16]; // @[primitives.scala:77:20, :78:22]
wire [3:0] _roundMask_T_44 = _roundMask_T_43[3:0]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_45 = _roundMask_T_44[1:0]; // @[primitives.scala:77:20]
wire _roundMask_T_46 = _roundMask_T_45[0]; // @[primitives.scala:77:20]
wire _roundMask_T_47 = _roundMask_T_45[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_48 = {_roundMask_T_46, _roundMask_T_47}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_49 = _roundMask_T_44[3:2]; // @[primitives.scala:77:20]
wire _roundMask_T_50 = _roundMask_T_49[0]; // @[primitives.scala:77:20]
wire _roundMask_T_51 = _roundMask_T_49[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_52 = {_roundMask_T_50, _roundMask_T_51}; // @[primitives.scala:77:20]
wire [3:0] _roundMask_T_53 = {_roundMask_T_48, _roundMask_T_52}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_54 = _roundMask_T_43[5:4]; // @[primitives.scala:77:20]
wire _roundMask_T_55 = _roundMask_T_54[0]; // @[primitives.scala:77:20]
wire _roundMask_T_56 = _roundMask_T_54[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_57 = {_roundMask_T_55, _roundMask_T_56}; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_58 = {_roundMask_T_53, _roundMask_T_57}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_59 = {_roundMask_T_42, _roundMask_T_58}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_60 = ~_roundMask_T_59; // @[primitives.scala:73:32, :77:20]
wire [21:0] _roundMask_T_61 = roundMask_msb_2 ? 22'h0 : _roundMask_T_60; // @[primitives.scala:58:25, :73:{21,32}]
wire [21:0] _roundMask_T_62 = ~_roundMask_T_61; // @[primitives.scala:73:{17,21}]
wire [24:0] _roundMask_T_63 = {_roundMask_T_62, 3'h7}; // @[primitives.scala:68:58, :73:17]
wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> roundMask_lsbs_3); // @[primitives.scala:59:26, :76:56]
wire [2:0] _roundMask_T_64 = roundMask_shift_1[2:0]; // @[primitives.scala:76:56, :78:22]
wire [1:0] _roundMask_T_65 = _roundMask_T_64[1:0]; // @[primitives.scala:77:20, :78:22]
wire _roundMask_T_66 = _roundMask_T_65[0]; // @[primitives.scala:77:20]
wire _roundMask_T_67 = _roundMask_T_65[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_68 = {_roundMask_T_66, _roundMask_T_67}; // @[primitives.scala:77:20]
wire _roundMask_T_69 = _roundMask_T_64[2]; // @[primitives.scala:77:20, :78:22]
wire [2:0] _roundMask_T_70 = {_roundMask_T_68, _roundMask_T_69}; // @[primitives.scala:77:20]
wire [2:0] _roundMask_T_71 = roundMask_msb_3 ? _roundMask_T_70 : 3'h0; // @[primitives.scala:58:25, :62:24, :77:20]
wire [24:0] _roundMask_T_72 = roundMask_msb_1 ? _roundMask_T_63 : {22'h0, _roundMask_T_71}; // @[primitives.scala:58:25, :62:24, :67:24, :68:58]
wire [24:0] _roundMask_T_73 = roundMask_msb ? _roundMask_T_72 : 25'h0; // @[primitives.scala:58:25, :62:24, :67:24]
wire [24:0] _roundMask_T_74 = {_roundMask_T_73[24:1], _roundMask_T_73[0] | doShiftSigDown1}; // @[primitives.scala:62:24]
wire [26:0] roundMask = {_roundMask_T_74, 2'h3}; // @[RoundAnyRawFNToRecFN.scala:159:{23,42}]
wire [27:0] _shiftedRoundMask_T = {1'h0, roundMask}; // @[RoundAnyRawFNToRecFN.scala:159:42, :162:41]
wire [26:0] shiftedRoundMask = _shiftedRoundMask_T[27:1]; // @[RoundAnyRawFNToRecFN.scala:162:{41,53}]
wire [26:0] _roundPosMask_T = ~shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:162:53, :163:28]
wire [26:0] roundPosMask = _roundPosMask_T & roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :163:{28,46}]
wire [26:0] _roundPosBit_T = adjustedSig & roundPosMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :163:46, :164:40]
wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}]
wire _roundIncr_T_1 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:67]
wire _roundedSig_T_3 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :175:49]
wire [26:0] _anyRoundExtra_T = adjustedSig & shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :162:53, :165:42]
wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}]
wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36]
wire roundIncr = _roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31]
wire [26:0] _roundedSig_T = adjustedSig | roundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :159:42, :174:32]
wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}]
wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}]
wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30]
wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30]
wire [25:0] _roundedSig_T_6 = roundMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:159:42, :177:35]
wire [25:0] _roundedSig_T_7 = _roundedSig_T_5 ? _roundedSig_T_6 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}, :177:35]
wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}]
wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21]
wire [26:0] _roundedSig_T_10 = ~roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :180:32]
wire [26:0] _roundedSig_T_11 = adjustedSig & _roundedSig_T_10; // @[RoundAnyRawFNToRecFN.scala:114:22, :180:{30,32}]
wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}]
wire [25:0] _roundedSig_T_14 = roundPosMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:163:46, :181:67]
wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12}; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}]
wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47]
wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54]
wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}]
wire [10:0] sRoundedExp = {io_in_sExp_0[9], io_in_sExp_0} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:48:5, :185:{40,76}]
assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37]
assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37]
wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27]
wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27]
assign _common_fractOut_T_2 = doShiftSigDown1 ? _common_fractOut_T : _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :189:16, :190:27, :191:27]
assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16]
wire [3:0] _common_overflow_T = sRoundedExp[10:7]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30]
assign _common_overflow_T_1 = $signed(_common_overflow_T) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}]
assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50]
assign _common_totalUnderflow_T = $signed(sRoundedExp) < 11'sh6B; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31]
assign common_totalUnderflow = _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:125:37, :200:31]
wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45]
wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45, :205:44]
wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:61]
wire unboundedRange_roundPosBit = doShiftSigDown1 ? _unboundedRange_roundPosBit_T : _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :203:{16,45,61}]
wire _unboundedRange_roundIncr_T_1 = unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:67]
wire _unboundedRange_anyRound_T_1 = doShiftSigDown1 & _unboundedRange_anyRound_T; // @[RoundAnyRawFNToRecFN.scala:120:57, :205:{30,44}]
wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:114:22, :205:63]
wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}]
wire unboundedRange_anyRound = _unboundedRange_anyRound_T_1 | _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{30,49,70}]
wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46]
wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27]
wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27]
wire roundCarry = doShiftSigDown1 ? _roundCarry_T : _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :211:16, :212:27, :213:27]
wire [1:0] _common_underflow_T = io_in_sExp_0[9:8]; // @[RoundAnyRawFNToRecFN.scala:48:5, :220:49]
wire _common_underflow_T_1 = _common_underflow_T != 2'h1; // @[RoundAnyRawFNToRecFN.scala:220:{49,64}]
wire _common_underflow_T_2 = anyRound & _common_underflow_T_1; // @[RoundAnyRawFNToRecFN.scala:166:36, :220:{32,64}]
wire _common_underflow_T_3 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57]
wire _common_underflow_T_9 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57, :225:49]
wire _common_underflow_T_4 = roundMask[2]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:71]
wire _common_underflow_T_5 = doShiftSigDown1 ? _common_underflow_T_3 : _common_underflow_T_4; // @[RoundAnyRawFNToRecFN.scala:120:57, :221:{30,57,71}]
wire _common_underflow_T_6 = _common_underflow_T_2 & _common_underflow_T_5; // @[RoundAnyRawFNToRecFN.scala:220:{32,72}, :221:30]
wire _common_underflow_T_8 = roundMask[4]; // @[RoundAnyRawFNToRecFN.scala:159:42, :224:49]
wire _common_underflow_T_10 = doShiftSigDown1 ? _common_underflow_T_8 : _common_underflow_T_9; // @[RoundAnyRawFNToRecFN.scala:120:57, :223:39, :224:49, :225:49]
wire _common_underflow_T_11 = ~_common_underflow_T_10; // @[RoundAnyRawFNToRecFN.scala:223:{34,39}]
wire _common_underflow_T_12 = _common_underflow_T_11; // @[RoundAnyRawFNToRecFN.scala:222:77, :223:34]
wire _common_underflow_T_13 = _common_underflow_T_12 & roundCarry; // @[RoundAnyRawFNToRecFN.scala:211:16, :222:77, :226:38]
wire _common_underflow_T_14 = _common_underflow_T_13 & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :226:38, :227:45]
wire _common_underflow_T_15 = _common_underflow_T_14 & unboundedRange_roundIncr; // @[RoundAnyRawFNToRecFN.scala:208:46, :227:{45,60}]
wire _common_underflow_T_16 = ~_common_underflow_T_15; // @[RoundAnyRawFNToRecFN.scala:222:27, :227:60]
wire _common_underflow_T_17 = _common_underflow_T_6 & _common_underflow_T_16; // @[RoundAnyRawFNToRecFN.scala:220:72, :221:76, :222:27]
assign _common_underflow_T_18 = common_totalUnderflow | _common_underflow_T_17; // @[RoundAnyRawFNToRecFN.scala:125:37, :217:40, :221:76]
assign common_underflow = _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:126:37, :217:40]
assign _common_inexact_T = common_totalUnderflow | anyRound; // @[RoundAnyRawFNToRecFN.scala:125:37, :166:36, :230:49]
assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49]
wire isNaNOut = io_invalidExc_0 | io_in_isNaN_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34]
wire _commonCase_T = ~isNaNOut; // @[RoundAnyRawFNToRecFN.scala:235:34, :237:22]
wire _commonCase_T_1 = ~notNaN_isSpecialInfOut; // @[RoundAnyRawFNToRecFN.scala:236:49, :237:36]
wire _commonCase_T_2 = _commonCase_T & _commonCase_T_1; // @[RoundAnyRawFNToRecFN.scala:237:{22,33,36}]
wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64]
wire commonCase = _commonCase_T_2 & _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{33,61,64}]
wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32]
wire _notNaN_isInfOut_T = overflow; // @[RoundAnyRawFNToRecFN.scala:238:32, :248:45]
wire underflow = commonCase & common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37, :237:61, :239:32]
wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43]
wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}]
wire _pegMinNonzeroMagOut_T = commonCase & common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :237:61, :245:20]
wire notNaN_isInfOut = notNaN_isSpecialInfOut | _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:236:49, :248:{32,45}]
wire signOut = ~isNaNOut & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :250:22]
wire _expOut_T = io_in_isZero_0 | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:48:5, :125:37, :253:32]
wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}]
wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}]
wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14]
wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17]
wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17]
wire [8:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 6'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18]
wire [8:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}]
wire [8:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14]
wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18]
wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15]
wire [8:0] _expOut_T_18 = notNaN_isInfOut ? 9'h180 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16]
wire [8:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16]
wire [8:0] _expOut_T_20 = isNaNOut ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:235:34, :278:16]
wire [8:0] expOut = _expOut_T_19 | _expOut_T_20; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73, :278:16]
wire _fractOut_T = isNaNOut | io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :280:22]
wire _fractOut_T_1 = _fractOut_T | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :280:{22,38}]
wire [22:0] _fractOut_T_2 = {isNaNOut, 22'h0}; // @[RoundAnyRawFNToRecFN.scala:235:34, :281:16]
wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? _fractOut_T_2 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16]
wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11]
wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23]
assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}]
assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33]
wire [1:0] _io_exceptionFlags_T = {io_invalidExc_0, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:23]
wire [2:0] _io_exceptionFlags_T_1 = {_io_exceptionFlags_T, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:{23,41}]
wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, underflow}; // @[RoundAnyRawFNToRecFN.scala:239:32, :288:{41,53}]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}]
assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Manager.scala:
package rerocc.manager
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.prci._
import freechips.rocketchip.subsystem._
import rerocc.bus._
case class ReRoCCManagerParams(
managerId: Int,
)
case object ReRoCCManagerControlAddress extends Field[BigInt](0x20000)
// For local PTW
class MiniDCache(reRoCCId: Int, crossing: ClockCrossingType)(implicit p: Parameters) extends DCache(0, crossing)(p) {
override def cacheClientParameters = Seq(TLMasterParameters.v1(
name = s"ReRoCC ${reRoCCId} DCache",
sourceId = IdRange(0, 1),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes)))
override def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"ReRoCC ${reRoCCId} DCache MMIO",
sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs),
requestFifo = true))
}
class ReRoCCManager(reRoCCTileParams: ReRoCCTileParams, roccOpcode: UInt)(implicit p: Parameters) extends LazyModule {
val node = ReRoCCManagerNode(ReRoCCManagerParams(reRoCCTileParams.reroccId))
val ibufEntries = p(ReRoCCIBufEntriesKey)
override lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val manager_id = Input(UInt(log2Ceil(p(ReRoCCTileKey).size).W))
val cmd = Decoupled(new RoCCCommand)
val resp = Flipped(Decoupled(new RoCCResponse))
val busy = Input(Bool())
val ptw = Flipped(new DatapathPTWIO)
})
val (rerocc, edge) = node.in(0)
val s_idle :: s_active :: s_rel_wait :: s_sfence :: s_unbusy :: Nil = Enum(5)
val numClients = edge.cParams.clients.map(_.nCfgs).sum
val client = Reg(UInt(log2Ceil(numClients).W))
val status = Reg(new MStatus)
val ptbr = Reg(new PTBR)
val state = RegInit(s_idle)
io.ptw.ptbr := ptbr
io.ptw.hgatp := 0.U.asTypeOf(new PTBR)
io.ptw.vsatp := 0.U.asTypeOf(new PTBR)
io.ptw.sfence.valid := state === s_sfence
io.ptw.sfence.bits.rs1 := false.B
io.ptw.sfence.bits.rs2 := false.B
io.ptw.sfence.bits.addr := 0.U
io.ptw.sfence.bits.asid := 0.U
io.ptw.sfence.bits.hv := false.B
io.ptw.sfence.bits.hg := false.B
io.ptw.status := status
io.ptw.hstatus := 0.U.asTypeOf(new HStatus)
io.ptw.gstatus := 0.U.asTypeOf(new MStatus)
io.ptw.pmp.foreach(_ := 0.U.asTypeOf(new PMP))
val rr_req = Queue(rerocc.req)
val (req_first, req_last, req_beat) = ReRoCCMsgFirstLast(rr_req, true)
val rr_resp = rerocc.resp
rr_req.ready := false.B
val inst_q = Module(new Queue(new RoCCCommand, ibufEntries))
val enq_inst = Reg(new RoCCCommand)
val next_enq_inst = WireInit(enq_inst)
inst_q.io.enq.valid := false.B
inst_q.io.enq.bits := next_enq_inst
inst_q.io.enq.bits.inst.opcode := roccOpcode
// 0 -> acquire ack
// 1 -> inst ack
// 2 -> writeback
// 3 -> rel
// 4 -> unbusyack
val resp_arb = Module(new ReRoCCMsgArbiter(edge.bundle, 5, false))
rr_resp <> resp_arb.io.out
resp_arb.io.in.foreach { i => i.valid := false.B }
val status_lower = Reg(UInt(64.W))
when (rr_req.valid) {
when (rr_req.bits.opcode === ReRoCCProtocol.mAcquire) {
rr_req.ready := resp_arb.io.in(0).ready
resp_arb.io.in(0).valid := true.B
when (state === s_idle && rr_req.fire) {
state := s_active
client := rr_req.bits.client_id
}
} .elsewhen (rr_req.bits.opcode === ReRoCCProtocol.mUStatus) {
rr_req.ready := !inst_q.io.deq.valid && !io.busy
when (!inst_q.io.deq.valid && !io.busy) {
when (req_first) { status_lower := rr_req.bits.data }
when (req_last) { status := Cat(rr_req.bits.data, status_lower).asTypeOf(new MStatus) }
}
} .elsewhen (rr_req.bits.opcode === ReRoCCProtocol.mUPtbr) {
rr_req.ready := !inst_q.io.deq.valid && !io.busy
when (!inst_q.io.deq.valid && !io.busy) { ptbr := rr_req.bits.data.asTypeOf(new PTBR) }
} .elsewhen (rr_req.bits.opcode === ReRoCCProtocol.mInst) {
assert(state === s_active && inst_q.io.enq.ready)
rr_req.ready := true.B
when (req_beat === 0.U) {
val inst = rr_req.bits.data.asTypeOf(new RoCCInstruction)
enq_inst.inst := inst
when (!inst.xs1 ) { enq_inst.rs1 := 0.U }
when (!inst.xs2 ) { enq_inst.rs2 := 0.U }
} .otherwise {
val enq_inst_rs1 = enq_inst.inst.xs1 && req_beat === 1.U
val enq_inst_rs2 = enq_inst.inst.xs2 && req_beat === Mux(enq_inst.inst.xs1, 2.U, 1.U)
when (enq_inst_rs1) { next_enq_inst.rs1 := rr_req.bits.data }
when (enq_inst_rs2) { next_enq_inst.rs2 := rr_req.bits.data }
enq_inst := next_enq_inst
}
when (req_last) {
inst_q.io.enq.valid := true.B
assert(inst_q.io.enq.ready)
}
} .elsewhen (rr_req.bits.opcode === ReRoCCProtocol.mRelease) {
rr_req.ready := true.B
state := s_rel_wait
} .elsewhen (rr_req.bits.opcode === ReRoCCProtocol.mUnbusy) {
rr_req.ready := true.B
state := s_unbusy
} .otherwise {
assert(false.B)
}
}
// acquire->ack/nack
resp_arb.io.in(0).bits.opcode := ReRoCCProtocol.sAcqResp
resp_arb.io.in(0).bits.client_id := rr_req.bits.client_id
resp_arb.io.in(0).bits.manager_id := io.manager_id
resp_arb.io.in(0).bits.data := state === s_idle
// insts -> (inst_q, inst_ack)
io.cmd.valid := inst_q.io.deq.valid && resp_arb.io.in(1).ready
io.cmd.bits := inst_q.io.deq.bits
inst_q.io.deq.ready := io.cmd.ready && resp_arb.io.in(1).ready
resp_arb.io.in(1).valid := inst_q.io.deq.valid && io.cmd.ready
resp_arb.io.in(1).bits.opcode := ReRoCCProtocol.sInstAck
resp_arb.io.in(1).bits.client_id := client
resp_arb.io.in(1).bits.manager_id := io.manager_id
resp_arb.io.in(1).bits.data := 0.U
// writebacks
val resp = Queue(io.resp)
val resp_rd = RegInit(false.B)
resp_arb.io.in(2).valid := resp.valid
resp_arb.io.in(2).bits.opcode := ReRoCCProtocol.sWrite
resp_arb.io.in(2).bits.client_id := client
resp_arb.io.in(2).bits.manager_id := io.manager_id
resp_arb.io.in(2).bits.data := Mux(resp_rd, resp.bits.rd, resp.bits.data)
when (resp_arb.io.in(2).fire) { resp_rd := !resp_rd }
resp.ready := resp_arb.io.in(2).ready && resp_rd
// release
resp_arb.io.in(3).valid := state === s_rel_wait && !io.busy && inst_q.io.count === 0.U
resp_arb.io.in(3).bits.opcode := ReRoCCProtocol.sRelResp
resp_arb.io.in(3).bits.client_id := client
resp_arb.io.in(3).bits.manager_id := io.manager_id
resp_arb.io.in(3).bits.data := 0.U
when (resp_arb.io.in(3).fire) {
state := s_sfence
}
when (state === s_sfence) { state := s_idle }
// unbusyack
resp_arb.io.in(4).valid := state === s_unbusy && !io.busy && inst_q.io.count === 0.U
resp_arb.io.in(4).bits.opcode := ReRoCCProtocol.sUnbusyAck
resp_arb.io.in(4).bits.client_id := client
resp_arb.io.in(4).bits.manager_id := io.manager_id
resp_arb.io.in(4).bits.data := 0.U
when (resp_arb.io.in(4).fire) { state := s_active }
}
}
class ReRoCCManagerTile()(implicit p: Parameters) extends LazyModule {
val reRoCCParams = p(TileKey).asInstanceOf[ReRoCCTileParams]
val reRoCCId = reRoCCParams.reroccId
def this(tileParams: ReRoCCTileParams, p: Parameters) = {
this()(p.alterMap(Map(
TileKey -> tileParams,
TileVisibilityNodeKey -> TLEphemeralNode()(ValName("rerocc_manager"))
)))
}
val reroccManagerIdSinkNode = BundleBridgeSink[UInt]()
val rocc = reRoCCParams.genRoCC.get(p)
require(rocc.opcodes.opcodes.size == 1)
val rerocc_manager = LazyModule(new ReRoCCManager(reRoCCParams, rocc.opcodes.opcodes.head))
val reRoCCNode = ReRoCCIdentityNode()
rerocc_manager.node := ReRoCCBuffer() := reRoCCNode
val tlNode = p(TileVisibilityNodeKey) // throttle before TL Node (merged ->
val tlXbar = TLXbar()
val stlNode = TLIdentityNode()
tlXbar :=* rocc.atlNode
if (reRoCCParams.mergeTLNodes) {
tlXbar :=* rocc.tlNode
} else {
tlNode :=* rocc.tlNode
}
tlNode :=* TLBuffer() :=* tlXbar
rocc.stlNode :*= stlNode
// minicache
val dcache = reRoCCParams.dcacheParams.map(_ => LazyModule(new MiniDCache(reRoCCId, SynchronousCrossing())(p)))
dcache.map(d => tlXbar := TLWidthWidget(reRoCCParams.rowBits/8) := d.node)
val hellammio: Option[HellaMMIO] = if (!dcache.isDefined) {
val h = LazyModule(new HellaMMIO(s"ReRoCC $reRoCCId MMIO"))
tlXbar := h.node
Some(h)
} else { None }
val ctrl = LazyModule(new ReRoCCManagerControl(reRoCCId, 8))
override lazy val module = new LazyModuleImp(this) {
val dcacheArb = Module(new HellaCacheArbiter(2)(p))
dcache.map(_.module.io.cpu).getOrElse(hellammio.get.module.io) <> dcacheArb.io.mem
val edge = dcache.map(_.node.edges.out(0)).getOrElse(hellammio.get.node.edges.out(0))
val ptw = Module(new PTW(1 + rocc.nPTWPorts)(edge, p))
if (dcache.isDefined) {
dcache.get.module.io.tlb_port := DontCare
dcache.get.module.io.tlb_port.req.valid := false.B
ptw.io.requestor(0) <> dcache.get.module.io.ptw
} else {
ptw.io.requestor(0) := DontCare
ptw.io.requestor(0).req.valid := false.B
}
dcacheArb.io.requestor(0) <> ptw.io.mem
val dcIF = Module(new SimpleHellaCacheIF)
dcIF.io.requestor <> rocc.module.io.mem
dcacheArb.io.requestor(1) <> dcIF.io.cache
for (i <- 0 until rocc.nPTWPorts) {
ptw.io.requestor(1+i) <> rocc.module.io.ptw(i)
}
rerocc_manager.module.io.manager_id := reroccManagerIdSinkNode.bundle
rocc.module.io.cmd <> rerocc_manager.module.io.cmd
rerocc_manager.module.io.resp <> rocc.module.io.resp
rerocc_manager.module.io.busy := rocc.module.io.busy
ptw.io.dpath <> rerocc_manager.module.io.ptw
rocc.module.io.fpu_req.ready := false.B
assert(!rocc.module.io.fpu_req.valid)
rocc.module.io.fpu_resp.valid := false.B
rocc.module.io.fpu_resp.bits := DontCare
rocc.module.io.exception := false.B
ctrl.module.io.mgr_busy := rerocc_manager.module.io.busy
ctrl.module.io.rocc_busy := rocc.module.io.busy
}
}
File Protocol.scala:
package rerocc.bus
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tile._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import rerocc.client.{ReRoCCClientParams}
import rerocc.manager.{ReRoCCManagerParams}
object ReRoCCProtocol {
val width = 3
val mAcquire = 0.U(width.W)
// beat0: data = inst
// beat1: data = mstatus[63:0]
// beat2: data = mstatus[127:64]
val mInst = 1.U(width.W)
// beat0: data = mstatus[63:0]
// beat1: data = mstatus[127:0]
val mUStatus = 2.U(width.W)
// beat0: data = ptbr
val mUPtbr = 3.U(width.W)
val mRelease = 4.U(width.W)
val mUnbusy = 5.U(width.W)
// data
// data = acquired
val sAcqResp = 0.U(width.W)
// data = 0
val sInstAck = 1.U(width.W)
// beat0: data = data
// beat1: data = rd
val sWrite = 2.U(width.W)
val sRelResp = 3.U(width.W)
val sUnbusyAck = 4.U(width.W)
val MAX_BEATS = 3
}
class ReRoCCMsgBundle(val params: ReRoCCBundleParams) extends Bundle {
val opcode = UInt(ReRoCCProtocol.width.W)
val client_id = UInt(params.clientIdBits.W)
val manager_id = UInt(params.managerIdBits.W)
val data = UInt(64.W)
}
object ReRoCCMsgFirstLast {
def apply(m: DecoupledIO[ReRoCCMsgBundle], isReq: Boolean): (Bool, Bool, UInt) = {
val beat = RegInit(0.U(log2Ceil(ReRoCCProtocol.MAX_BEATS).W))
val max_beat = RegInit(0.U(log2Ceil(ReRoCCProtocol.MAX_BEATS).W))
val first = beat === 0.U
val last = Wire(Bool())
val inst = m.bits.data.asTypeOf(new RoCCInstruction)
when (m.fire && first) {
max_beat := 0.U
if (isReq) {
when (m.bits.opcode === ReRoCCProtocol.mInst) {
max_beat := inst.xs1 +& inst.xs2
} .elsewhen (m.bits.opcode === ReRoCCProtocol.mUStatus) {
max_beat := 1.U
}
} else {
when (m.bits.opcode === ReRoCCProtocol.sWrite) {
max_beat := 1.U
}
}
}
last := true.B
if (isReq) {
when (m.bits.opcode === ReRoCCProtocol.mUStatus) {
last := beat === max_beat && !first
} .elsewhen (m.bits.opcode === ReRoCCProtocol.mInst) {
last := Mux(first, !inst.xs1 && !inst.xs2, beat === max_beat)
}
} else {
when (m.bits.opcode === ReRoCCProtocol.sWrite) {
last := beat === max_beat && !first
}
}
when (m.fire) { beat := beat + 1.U }
when (m.fire && last) {
max_beat := 0.U
beat := 0.U
}
(first, last, beat)
}
}
class ReRoCCBundle(val params: ReRoCCBundleParams) extends Bundle {
val req = Decoupled(new ReRoCCMsgBundle(params))
val resp = Flipped(Decoupled(new ReRoCCMsgBundle(params)))
}
case class EmptyParams()
object ReRoCCImp extends SimpleNodeImp[ReRoCCClientPortParams, ReRoCCManagerPortParams, ReRoCCEdgeParams, ReRoCCBundle] {
def edge(pd: ReRoCCClientPortParams, pu: ReRoCCManagerPortParams, p: Parameters, sourceInfo: SourceInfo) = {
ReRoCCEdgeParams(pu, pd)
}
def bundle(e: ReRoCCEdgeParams) = new ReRoCCBundle(e.bundle)
def render(ei: ReRoCCEdgeParams) = RenderedEdge(colour = "#000000" /* black */)
}
case class ReRoCCClientNode(clientParams: ReRoCCClientParams)(implicit valName: ValName) extends SourceNode(ReRoCCImp)(Seq(ReRoCCClientPortParams(Seq(clientParams))))
case class ReRoCCManagerNode(managerParams: ReRoCCManagerParams)(implicit valName: ValName) extends SinkNode(ReRoCCImp)(Seq(ReRoCCManagerPortParams(Seq(managerParams))))
class ReRoCCBuffer(b: BufferParams = BufferParams.default)(implicit p: Parameters) extends LazyModule {
val node = new AdapterNode(ReRoCCImp)({s => s}, {s => s})
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, _), (out, _)) =>
out.req <> b(in.req)
in.resp <> b(out.resp)
}
}
}
object ReRoCCBuffer {
def apply(b: BufferParams = BufferParams.default)(implicit p: Parameters) = {
val rerocc_buffer = LazyModule(new ReRoCCBuffer(b)(p))
rerocc_buffer.node
}
}
case class ReRoCCIdentityNode()(implicit valName: ValName) extends IdentityNode(ReRoCCImp)()
File WidthWidget.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.util.{Repeater, UIntToOH1}
// innBeatBytes => the new client-facing bus width
class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule
{
private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes
val node = new TLAdapterNode(
clientFn = { case c => c },
managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){
override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired)
}
override lazy val desiredName = s"TLWidthWidget$innerBeatBytes"
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = outBytes / inBytes
val keepBits = log2Ceil(outBytes)
val dropBits = log2Ceil(inBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR }
val corrupt_reg = RegInit(false.B)
val corrupt_in = edgeIn.corrupt(in.bits)
val corrupt_out = corrupt_in || corrupt_reg
when (in.fire) {
count := count + 1.U
corrupt_reg := corrupt_out
when (last) {
count := 0.U
corrupt_reg := false.B
}
}
def helper(idata: UInt): UInt = {
// rdata is X until the first time a multi-beat write occurs.
// Prevent the X from leaking outside by jamming the mux control until
// the first time rdata is written (and hence no longer X).
val rdata_written_once = RegInit(false.B)
val masked_enable = enable.map(_ || !rdata_written_once)
val odata = Seq.fill(ratio) { WireInit(idata) }
val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata)))
val pdata = rdata :+ idata
val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) }
when (in.fire && !last) {
rdata_written_once := true.B
(rdata zip mdata) foreach { case (r, m) => r := m }
}
Cat(mdata.reverse)
}
in.ready := out.ready || !last
out.valid := in.valid && last
out.bits := in.bits
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits)))
edgeOut.corrupt(out.bits) := corrupt_out
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleC, i: TLBundleC) => ()
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossible bundle combination in WidthWidget")
}
}
def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = inBytes / outBytes
val keepBits = log2Ceil(inBytes)
val dropBits = log2Ceil(outBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
when (out.fire) {
count := count + 1.U
when (last) { count := 0.U }
}
// For sub-beat transfer, extract which part matters
val sel = in.bits match {
case a: TLBundleA => a.address(keepBits-1, dropBits)
case b: TLBundleB => b.address(keepBits-1, dropBits)
case c: TLBundleC => c.address(keepBits-1, dropBits)
case d: TLBundleD => {
val sel = sourceMap(d.source)
val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer
hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway
}
}
val index = sel | count
def helper(idata: UInt, width: Int): UInt = {
val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) }
mux(index)
}
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8))
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1)
case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1)
case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossbile bundle combination in WidthWidget")
}
// Repeat the input if we're not last
!last
}
def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) {
// nothing to do; pass it through
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
} else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) {
// split input to output
val repeat = Wire(Bool())
val repeated = Repeater(in, repeat)
val cated = Wire(chiselTypeOf(repeated))
cated <> repeated
edgeIn.data(cated.bits) := Cat(
edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8),
edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0))
repeat := split(edgeIn, cated, edgeOut, out, sourceMap)
} else {
// merge input to output
merge(edgeIn, in, edgeOut, out)
}
}
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// If the master is narrower than the slave, the D channel must be narrowed.
// This is tricky, because the D channel has no address data.
// Thus, you don't know which part of a sub-beat transfer to extract.
// To fix this, we record the relevant address bits for all sources.
// The assumption is that this sort of situation happens only where
// you connect a narrow master to the system bus, so there are few sources.
def sourceMap(source_bits: UInt) = {
val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits
require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes)
val keepBits = log2Ceil(edgeOut.manager.beatBytes)
val dropBits = log2Ceil(edgeIn.manager.beatBytes)
val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W)))
val a_sel = in.a.bits.address(keepBits-1, dropBits)
when (in.a.fire) {
if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning
sources(0) := a_sel
} else {
sources(in.a.bits.source) := a_sel
}
}
// depopulate unused source registers:
edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U }
val bypass = in.a.valid && in.a.bits.source === source
if (edgeIn.manager.minLatency > 0) sources(source)
else Mux(bypass, a_sel, sources(source))
}
splice(edgeIn, in.a, edgeOut, out.a, sourceMap)
splice(edgeOut, out.d, edgeIn, in.d, sourceMap)
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
splice(edgeOut, out.b, edgeIn, in.b, sourceMap)
splice(edgeIn, in.c, edgeOut, out.c, sourceMap)
out.e.valid := in.e.valid
out.e.bits := in.e.bits
in.e.ready := out.e.ready
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLWidthWidget
{
def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode =
{
val widget = LazyModule(new TLWidthWidget(innerBeatBytes))
widget.node
}
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("WidthWidget"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
(ram.node
:= TLDelayer(0.1)
:= TLFragmenter(4, 256)
:= TLWidthWidget(second)
:= TLWidthWidget(first)
:= TLDelayer(0.1)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module)
dut.io.start := DontCare
io.finished := dut.io.finished
}
File RoCCFragments.scala:
package chipyard.config
import chisel3._
import org.chipsalliance.cde.config.{Field, Parameters, Config}
import freechips.rocketchip.tile._
import freechips.rocketchip.diplomacy._
import gemmini._
import chipyard.{TestSuitesKey, TestSuiteHelper}
/**
* Map from a tileId to a particular RoCC accelerator
*/
case object MultiRoCCKey extends Field[Map[Int, Seq[Parameters => LazyRoCC]]](Map.empty[Int, Seq[Parameters => LazyRoCC]])
/**
* Config fragment to enable different RoCCs based on the tileId
*/
class WithMultiRoCC extends Config((site, here, up) => {
case BuildRoCC => site(MultiRoCCKey).getOrElse(site(TileKey).tileId, Nil)
})
/**
* Assigns what was previously in the BuildRoCC key to specific harts with MultiRoCCKey
* Must be paired with WithMultiRoCC
*/
class WithMultiRoCCFromBuildRoCC(harts: Int*) extends Config((site, here, up) => {
case BuildRoCC => Nil
case MultiRoCCKey => up(MultiRoCCKey, site) ++ harts.distinct.map { i =>
(i -> up(BuildRoCC, site))
}
})
class WithMultiRoCCGemmini[T <: Data : Arithmetic, U <: Data, V <: Data](
harts: Int*)(gemminiConfig: GemminiArrayConfig[T,U,V] = GemminiConfigs.defaultConfig) extends Config((site, here, up) => {
case MultiRoCCKey => up(MultiRoCCKey, site) ++ harts.distinct.map { i =>
(i -> Seq((p: Parameters) => {
implicit val q = p
val gemmini = LazyModule(new Gemmini(gemminiConfig))
gemmini
}))
}
})
class WithAccumulatorRoCC(op: OpcodeSet = OpcodeSet.custom1) extends Config((site, here, up) => {
case BuildRoCC => up(BuildRoCC) ++ Seq((p: Parameters) => {
val accumulator = LazyModule(new AccumulatorExample(op, n = 4)(p))
accumulator
})
})
class WithCharacterCountRoCC(op: OpcodeSet = OpcodeSet.custom2) extends Config((site, here, up) => {
case BuildRoCC => up(BuildRoCC) ++ Seq((p: Parameters) => {
val counter = LazyModule(new CharacterCountExample(op)(p))
counter
})
})
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue}
import freechips.rocketchip.util.BundleField
// Trades off slave port proximity against routing resource cost
object ForceFanout
{
def apply[T](
a: TriStateValue = TriStateValue.unset,
b: TriStateValue = TriStateValue.unset,
c: TriStateValue = TriStateValue.unset,
d: TriStateValue = TriStateValue.unset,
e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) =
{
body(p.alterPartial {
case ForceFanoutKey => p(ForceFanoutKey) match {
case ForceFanoutParams(pa, pb, pc, pd, pe) =>
ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe))
}
})
}
}
private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean)
private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false))
class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
val fifoIdMapper = fifoIdFactory()
port.managers map { manager => manager.v1copy(
fifoId = manager.fifoId.map(fifoIdMapper(_))
)}
}
)
}
){
override def circuitIdentity = outputs.size == 1 && inputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
if ((node.in.size * node.out.size) > (8*32)) {
println (s"!!! WARNING !!!")
println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.")
println (s"!!! WARNING !!!")
}
val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle))
override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_")
TLXbar.circuit(policy, node.in, node.out)
}
}
object TLXbar
{
def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId))
def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId))
def assignRanges(sizes: Seq[Int]) = {
val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) }
val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size
val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions
val ranges = (tuples zip starts) map { case ((sz, i), st) =>
(if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i)
}
ranges.sortBy(_._2).map(_._1) // Restore orignal order
}
def relabeler() = {
var idFactory = 0
() => {
val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int]
(x: Int) => {
if (fifoMap.contains(x)) fifoMap(x) else {
val out = idFactory
idFactory = idFactory + 1
fifoMap += (x -> out)
out
}
}
}
}
def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) {
val (io_in, edgesIn) = seqIn.unzip
val (io_out, edgesOut) = seqOut.unzip
// Not every master need connect to every slave on every channel; determine which connections are necessary
val reachableIO = edgesIn.map { cp => edgesOut.map { mp =>
cp.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)}}}}
}.toVector}.toVector
val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED)
}.toVector}.toVector
val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector}.toVector
val connectAIO = reachableIO
val connectBIO = probeIO
val connectCIO = releaseIO
val connectDIO = reachableIO
val connectEIO = releaseIO
def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } }
val connectAOI = transpose(connectAIO)
val connectBOI = transpose(connectBIO)
val connectCOI = transpose(connectCIO)
val connectDOI = transpose(connectDIO)
val connectEOI = transpose(connectEIO)
// Grab the port ID mapping
val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
// We need an intermediate size of bundle with the widest possible identifiers
val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params))
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
// Transform input bundle sources (sinks use global namespace on both sides)
val in = Wire(Vec(io_in.size, TLBundle(wide_bundle)))
for (i <- 0 until in.size) {
val r = inputIdRanges(i)
if (connectAIO(i).exists(x=>x)) {
in(i).a.bits.user := DontCare
in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll
in(i).a.bits.source := io_in(i).a.bits.source | r.start.U
} else {
in(i).a := DontCare
io_in(i).a := DontCare
in(i).a.valid := false.B
io_in(i).a.ready := true.B
}
if (connectBIO(i).exists(x=>x)) {
io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll
io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size)
} else {
in(i).b := DontCare
io_in(i).b := DontCare
in(i).b.ready := true.B
io_in(i).b.valid := false.B
}
if (connectCIO(i).exists(x=>x)) {
in(i).c.bits.user := DontCare
in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll
in(i).c.bits.source := io_in(i).c.bits.source | r.start.U
} else {
in(i).c := DontCare
io_in(i).c := DontCare
in(i).c.valid := false.B
io_in(i).c.ready := true.B
}
if (connectDIO(i).exists(x=>x)) {
io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll
io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size)
} else {
in(i).d := DontCare
io_in(i).d := DontCare
in(i).d.ready := true.B
io_in(i).d.valid := false.B
}
if (connectEIO(i).exists(x=>x)) {
in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll
} else {
in(i).e := DontCare
io_in(i).e := DontCare
in(i).e.valid := false.B
io_in(i).e.ready := true.B
}
}
// Transform output bundle sinks (sources use global namespace on both sides)
val out = Wire(Vec(io_out.size, TLBundle(wide_bundle)))
for (o <- 0 until out.size) {
val r = outputIdRanges(o)
if (connectAOI(o).exists(x=>x)) {
out(o).a.bits.user := DontCare
io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll
} else {
out(o).a := DontCare
io_out(o).a := DontCare
out(o).a.ready := true.B
io_out(o).a.valid := false.B
}
if (connectBOI(o).exists(x=>x)) {
out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll
} else {
out(o).b := DontCare
io_out(o).b := DontCare
out(o).b.valid := false.B
io_out(o).b.ready := true.B
}
if (connectCOI(o).exists(x=>x)) {
out(o).c.bits.user := DontCare
io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll
} else {
out(o).c := DontCare
io_out(o).c := DontCare
out(o).c.ready := true.B
io_out(o).c.valid := false.B
}
if (connectDOI(o).exists(x=>x)) {
out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll
out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U
} else {
out(o).d := DontCare
io_out(o).d := DontCare
out(o).d.valid := false.B
io_out(o).d.ready := true.B
}
if (connectEOI(o).exists(x=>x)) {
io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll
io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size)
} else {
out(o).e := DontCare
io_out(o).e := DontCare
out(o).e.ready := true.B
io_out(o).e.valid := false.B
}
}
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
// Based on input=>output connectivity, create per-input minimal address decode circuits
val requiredAC = (connectAIO ++ connectCIO).distinct
val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO =>
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
// Print the address mapping
if (false) {
println("Xbar mapping:")
route_addrs.foreach { p =>
print(" ")
p.foreach { a => print(s" ${a}") }
println("")
}
println("--")
}
(connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _)))
}.toMap
// Print the ID mapping
if (false) {
println(s"XBar mapping:")
(edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) =>
println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}")
}
println("")
}
val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) }
val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) }
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } }
val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } }
val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } }
val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) }
val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) }
val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) }
val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) }
val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) }
// Fanout the input sources to the output sinks
val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) })
val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) })
val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) })
val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) })
val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) })
// Arbitrate amongst the sources
for (o <- 0 until out.size) {
TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*)
TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*)
TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*)
filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B }
}
for (i <- 0 until in.size) {
TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*)
TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*)
filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B }
filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B }
}
}
def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val xbar = LazyModule(new TLXbar(policy, nameSuffix))
xbar.node
}
// Replicate an input port to each output port
def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = {
val filtered = Wire(Vec(select.size, chiselTypeOf(input)))
for (i <- 0 until select.size) {
filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits)
filtered(i).valid := input.valid && (select(i) || (select.size == 1).B)
}
input.ready := Mux1H(select, filtered.map(_.ready))
filtered
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Xbar"))
val xbar = LazyModule(new TLXbar)
xbar.node := TLDelayer(0.1) := model.node := fuzz.node
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val xbar = LazyModule(new TLXbar)
val fuzzers = (0 until nClients) map { n =>
val fuzz = LazyModule(new TLFuzzer(txns))
xbar.node := TLDelayer(0.1) := fuzz.node
fuzz
}
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzzers.last.module.io.finished
}
}
class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
| module ReRoCCManagerTile_4( // @[Manager.scala:237:34]
input clock, // @[Manager.scala:237:34]
input reset, // @[Manager.scala:237:34]
output auto_ctrl_ctrl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_ctrl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrl_ctrl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrl_ctrl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrl_ctrl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_ctrl_ctrl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [11:0] auto_ctrl_ctrl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_ctrl_ctrl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_ctrl_ctrl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_ctrl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_ctrl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_ctrl_ctrl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_ctrl_ctrl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_ctrl_ctrl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_ctrl_ctrl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_ctrl_ctrl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_buffer_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_buffer_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_buffer_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_buffer_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_buffer_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_b_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_buffer_out_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_buffer_out_b_bits_size, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_buffer_out_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_buffer_out_b_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_buffer_out_b_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_buffer_out_b_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_b_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_buffer_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_buffer_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_buffer_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_buffer_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_buffer_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_buffer_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_buffer_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_buffer_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_e_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_re_ro_cc_in_req_ready, // @[LazyModuleImp.scala:107:25]
input auto_re_ro_cc_in_req_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_re_ro_cc_in_req_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_re_ro_cc_in_req_bits_client_id, // @[LazyModuleImp.scala:107:25]
input auto_re_ro_cc_in_req_bits_manager_id, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_re_ro_cc_in_req_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_re_ro_cc_in_resp_ready, // @[LazyModuleImp.scala:107:25]
output auto_re_ro_cc_in_resp_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_re_ro_cc_in_resp_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_re_ro_cc_in_resp_bits_client_id, // @[LazyModuleImp.scala:107:25]
output auto_re_ro_cc_in_resp_bits_manager_id, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_re_ro_cc_in_resp_bits_data, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_rerocc_manager_id_sink_in // @[LazyModuleImp.scala:107:25]
);
wire reRoCCNodeOut_resp_valid; // @[MixedNode.scala:542:17]
wire [63:0] reRoCCNodeOut_resp_bits_data; // @[MixedNode.scala:542:17]
wire reRoCCNodeOut_resp_bits_manager_id; // @[MixedNode.scala:542:17]
wire [3:0] reRoCCNodeOut_resp_bits_client_id; // @[MixedNode.scala:542:17]
wire [2:0] reRoCCNodeOut_resp_bits_opcode; // @[MixedNode.scala:542:17]
wire reRoCCNodeOut_req_ready; // @[MixedNode.scala:542:17]
wire widget_auto_anon_out_e_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_c_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_b_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_b_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_out_b_bits_data; // @[WidthWidget.scala:27:9]
wire [7:0] widget_auto_anon_out_b_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_out_b_bits_address; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_b_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_b_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_out_b_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_b_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_e_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_e_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_c_valid; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_c_bits_data; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_c_bits_address; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_c_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_c_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_c_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_c_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_b_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire [7:0] widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire _dcIF_io_requestor_req_ready; // @[Manager.scala:255:22]
wire _dcIF_io_requestor_resp_valid; // @[Manager.scala:255:22]
wire [39:0] _dcIF_io_requestor_resp_bits_addr; // @[Manager.scala:255:22]
wire [7:0] _dcIF_io_requestor_resp_bits_tag; // @[Manager.scala:255:22]
wire [4:0] _dcIF_io_requestor_resp_bits_cmd; // @[Manager.scala:255:22]
wire [1:0] _dcIF_io_requestor_resp_bits_size; // @[Manager.scala:255:22]
wire _dcIF_io_requestor_resp_bits_signed; // @[Manager.scala:255:22]
wire [1:0] _dcIF_io_requestor_resp_bits_dprv; // @[Manager.scala:255:22]
wire _dcIF_io_requestor_resp_bits_dv; // @[Manager.scala:255:22]
wire [63:0] _dcIF_io_requestor_resp_bits_data; // @[Manager.scala:255:22]
wire [7:0] _dcIF_io_requestor_resp_bits_mask; // @[Manager.scala:255:22]
wire _dcIF_io_requestor_resp_bits_replay; // @[Manager.scala:255:22]
wire _dcIF_io_requestor_resp_bits_has_data; // @[Manager.scala:255:22]
wire [63:0] _dcIF_io_requestor_resp_bits_data_word_bypass; // @[Manager.scala:255:22]
wire [63:0] _dcIF_io_requestor_resp_bits_data_raw; // @[Manager.scala:255:22]
wire [63:0] _dcIF_io_requestor_resp_bits_store_data; // @[Manager.scala:255:22]
wire _dcIF_io_cache_req_valid; // @[Manager.scala:255:22]
wire [63:0] _dcIF_io_cache_s1_data_data; // @[Manager.scala:255:22]
wire [7:0] _dcIF_io_cache_s1_data_mask; // @[Manager.scala:255:22]
wire _ptw_io_requestor_0_req_ready; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_valid; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_ae_ptw; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_ae_final; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pf; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_gf; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_hr; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_hw; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_hx; // @[Manager.scala:243:21]
wire [9:0] _ptw_io_requestor_0_resp_bits_pte_reserved_for_future; // @[Manager.scala:243:21]
wire [43:0] _ptw_io_requestor_0_resp_bits_pte_ppn; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_resp_bits_pte_reserved_for_software; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_d; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_a; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_g; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_u; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_x; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_w; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_r; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_pte_v; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_resp_bits_level; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_homogeneous; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_gpa_valid; // @[Manager.scala:243:21]
wire [38:0] _ptw_io_requestor_0_resp_bits_gpa_bits; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_resp_bits_gpa_is_pte; // @[Manager.scala:243:21]
wire [3:0] _ptw_io_requestor_0_ptbr_mode; // @[Manager.scala:243:21]
wire [15:0] _ptw_io_requestor_0_ptbr_asid; // @[Manager.scala:243:21]
wire [43:0] _ptw_io_requestor_0_ptbr_ppn; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_debug; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_cease; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_wfi; // @[Manager.scala:243:21]
wire [31:0] _ptw_io_requestor_0_status_isa; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_dprv; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_dv; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_prv; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_v; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_sd; // @[Manager.scala:243:21]
wire [22:0] _ptw_io_requestor_0_status_zero2; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mpv; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_gva; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mbe; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_sbe; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_sxl; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_uxl; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_sd_rv32; // @[Manager.scala:243:21]
wire [7:0] _ptw_io_requestor_0_status_zero1; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_tsr; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_tw; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_tvm; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mxr; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_sum; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mprv; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_xs; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_fs; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_mpp; // @[Manager.scala:243:21]
wire [1:0] _ptw_io_requestor_0_status_vs; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_spp; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mpie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_ube; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_spie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_upie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_mie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_hie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_sie; // @[Manager.scala:243:21]
wire _ptw_io_requestor_0_status_uie; // @[Manager.scala:243:21]
wire _ptw_io_mem_req_valid; // @[Manager.scala:243:21]
wire [39:0] _ptw_io_mem_req_bits_addr; // @[Manager.scala:243:21]
wire _ptw_io_mem_req_bits_dv; // @[Manager.scala:243:21]
wire _ptw_io_mem_s1_kill; // @[Manager.scala:243:21]
wire _ptw_io_dpath_perf_pte_miss; // @[Manager.scala:243:21]
wire _ptw_io_dpath_clock_enabled; // @[Manager.scala:243:21]
wire _dcacheArb_io_requestor_0_req_ready; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_nack; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_nack_cause_raw; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_uncached; // @[Manager.scala:238:27]
wire [31:0] _dcacheArb_io_requestor_0_s2_paddr; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_resp_valid; // @[Manager.scala:238:27]
wire [39:0] _dcacheArb_io_requestor_0_resp_bits_addr; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_requestor_0_resp_bits_tag; // @[Manager.scala:238:27]
wire [4:0] _dcacheArb_io_requestor_0_resp_bits_cmd; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_requestor_0_resp_bits_size; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_resp_bits_signed; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_requestor_0_resp_bits_dprv; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_resp_bits_dv; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_requestor_0_resp_bits_mask; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_resp_bits_replay; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_resp_bits_has_data; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data_word_bypass; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data_raw; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_store_data; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_replay_next; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_ma_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_ma_st; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_pf_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_pf_st; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_ae_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_s2_xcpt_ae_st; // @[Manager.scala:238:27]
wire [39:0] _dcacheArb_io_requestor_0_s2_gpa; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_ordered; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_store_pending; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_acquire; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_release; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_grant; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_tlbMiss; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_blocked; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_req_ready; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_nack; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_nack_cause_raw; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_uncached; // @[Manager.scala:238:27]
wire [31:0] _dcacheArb_io_requestor_1_s2_paddr; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_resp_valid; // @[Manager.scala:238:27]
wire [39:0] _dcacheArb_io_requestor_1_resp_bits_addr; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_requestor_1_resp_bits_tag; // @[Manager.scala:238:27]
wire [4:0] _dcacheArb_io_requestor_1_resp_bits_cmd; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_requestor_1_resp_bits_size; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_resp_bits_signed; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_requestor_1_resp_bits_dprv; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_resp_bits_dv; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_requestor_1_resp_bits_mask; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_resp_bits_replay; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_resp_bits_has_data; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data_word_bypass; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data_raw; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_store_data; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_replay_next; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_ma_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_ma_st; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_pf_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_pf_st; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_ae_ld; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_s2_xcpt_ae_st; // @[Manager.scala:238:27]
wire [39:0] _dcacheArb_io_requestor_1_s2_gpa; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_ordered; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_store_pending; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_acquire; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_release; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_grant; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_tlbMiss; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_blocked; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad; // @[Manager.scala:238:27]
wire _dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore; // @[Manager.scala:238:27]
wire _dcacheArb_io_mem_req_valid; // @[Manager.scala:238:27]
wire [39:0] _dcacheArb_io_mem_req_bits_addr; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_mem_req_bits_tag; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_mem_req_bits_size; // @[Manager.scala:238:27]
wire [1:0] _dcacheArb_io_mem_req_bits_dprv; // @[Manager.scala:238:27]
wire _dcacheArb_io_mem_req_bits_dv; // @[Manager.scala:238:27]
wire _dcacheArb_io_mem_req_bits_phys; // @[Manager.scala:238:27]
wire _dcacheArb_io_mem_s1_kill; // @[Manager.scala:238:27]
wire [63:0] _dcacheArb_io_mem_s1_data_data; // @[Manager.scala:238:27]
wire [7:0] _dcacheArb_io_mem_s1_data_mask; // @[Manager.scala:238:27]
wire _dcache_io_cpu_req_ready; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_nack; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_nack_cause_raw; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_uncached; // @[Manager.scala:226:61]
wire [31:0] _dcache_io_cpu_s2_paddr; // @[Manager.scala:226:61]
wire _dcache_io_cpu_resp_valid; // @[Manager.scala:226:61]
wire [39:0] _dcache_io_cpu_resp_bits_addr; // @[Manager.scala:226:61]
wire [7:0] _dcache_io_cpu_resp_bits_tag; // @[Manager.scala:226:61]
wire [4:0] _dcache_io_cpu_resp_bits_cmd; // @[Manager.scala:226:61]
wire [1:0] _dcache_io_cpu_resp_bits_size; // @[Manager.scala:226:61]
wire _dcache_io_cpu_resp_bits_signed; // @[Manager.scala:226:61]
wire [1:0] _dcache_io_cpu_resp_bits_dprv; // @[Manager.scala:226:61]
wire _dcache_io_cpu_resp_bits_dv; // @[Manager.scala:226:61]
wire [63:0] _dcache_io_cpu_resp_bits_data; // @[Manager.scala:226:61]
wire [7:0] _dcache_io_cpu_resp_bits_mask; // @[Manager.scala:226:61]
wire _dcache_io_cpu_resp_bits_replay; // @[Manager.scala:226:61]
wire _dcache_io_cpu_resp_bits_has_data; // @[Manager.scala:226:61]
wire [63:0] _dcache_io_cpu_resp_bits_data_word_bypass; // @[Manager.scala:226:61]
wire [63:0] _dcache_io_cpu_resp_bits_data_raw; // @[Manager.scala:226:61]
wire [63:0] _dcache_io_cpu_resp_bits_store_data; // @[Manager.scala:226:61]
wire _dcache_io_cpu_replay_next; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_ma_ld; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_ma_st; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_pf_ld; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_pf_st; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_ae_ld; // @[Manager.scala:226:61]
wire _dcache_io_cpu_s2_xcpt_ae_st; // @[Manager.scala:226:61]
wire [39:0] _dcache_io_cpu_s2_gpa; // @[Manager.scala:226:61]
wire _dcache_io_cpu_ordered; // @[Manager.scala:226:61]
wire _dcache_io_cpu_store_pending; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_acquire; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_release; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_grant; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_tlbMiss; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_blocked; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_canAcceptStoreThenLoad; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_canAcceptStoreThenRMW; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_canAcceptLoadThenLoad; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_storeBufferEmptyAfterLoad; // @[Manager.scala:226:61]
wire _dcache_io_cpu_perf_storeBufferEmptyAfterStore; // @[Manager.scala:226:61]
wire _dcache_io_ptw_req_valid; // @[Manager.scala:226:61]
wire [26:0] _dcache_io_ptw_req_bits_bits_addr; // @[Manager.scala:226:61]
wire _dcache_io_ptw_req_bits_bits_need_gpa; // @[Manager.scala:226:61]
wire _buffer_auto_in_a_ready; // @[Buffer.scala:75:28]
wire _buffer_auto_in_b_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_in_b_bits_opcode; // @[Buffer.scala:75:28]
wire [1:0] _buffer_auto_in_b_bits_param; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_in_b_bits_size; // @[Buffer.scala:75:28]
wire [1:0] _buffer_auto_in_b_bits_source; // @[Buffer.scala:75:28]
wire [31:0] _buffer_auto_in_b_bits_address; // @[Buffer.scala:75:28]
wire [7:0] _buffer_auto_in_b_bits_mask; // @[Buffer.scala:75:28]
wire [63:0] _buffer_auto_in_b_bits_data; // @[Buffer.scala:75:28]
wire _buffer_auto_in_b_bits_corrupt; // @[Buffer.scala:75:28]
wire _buffer_auto_in_c_ready; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_in_d_bits_opcode; // @[Buffer.scala:75:28]
wire [1:0] _buffer_auto_in_d_bits_param; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_in_d_bits_size; // @[Buffer.scala:75:28]
wire [1:0] _buffer_auto_in_d_bits_source; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_in_d_bits_sink; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_denied; // @[Buffer.scala:75:28]
wire [63:0] _buffer_auto_in_d_bits_data; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:75:28]
wire _buffer_auto_in_e_ready; // @[Buffer.scala:75:28]
wire _xbar_auto_anon_in_0_a_ready; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_in_0_d_valid; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_in_0_d_bits_opcode; // @[Xbar.scala:346:26]
wire [1:0] _xbar_auto_anon_in_0_d_bits_param; // @[Xbar.scala:346:26]
wire [3:0] _xbar_auto_anon_in_0_d_bits_size; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_in_0_d_bits_sink; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_in_0_d_bits_denied; // @[Xbar.scala:346:26]
wire [63:0] _xbar_auto_anon_in_0_d_bits_data; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_in_0_d_bits_corrupt; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_out_a_valid; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_out_a_bits_opcode; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_out_a_bits_param; // @[Xbar.scala:346:26]
wire [3:0] _xbar_auto_anon_out_a_bits_size; // @[Xbar.scala:346:26]
wire [1:0] _xbar_auto_anon_out_a_bits_source; // @[Xbar.scala:346:26]
wire [31:0] _xbar_auto_anon_out_a_bits_address; // @[Xbar.scala:346:26]
wire [7:0] _xbar_auto_anon_out_a_bits_mask; // @[Xbar.scala:346:26]
wire [63:0] _xbar_auto_anon_out_a_bits_data; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_out_b_ready; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_out_c_valid; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_out_c_bits_opcode; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_out_c_bits_param; // @[Xbar.scala:346:26]
wire [3:0] _xbar_auto_anon_out_c_bits_size; // @[Xbar.scala:346:26]
wire [1:0] _xbar_auto_anon_out_c_bits_source; // @[Xbar.scala:346:26]
wire [31:0] _xbar_auto_anon_out_c_bits_address; // @[Xbar.scala:346:26]
wire [63:0] _xbar_auto_anon_out_c_bits_data; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_out_d_ready; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_out_e_valid; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_out_e_bits_sink; // @[Xbar.scala:346:26]
wire _rerocc_buffer_auto_out_req_valid; // @[Protocol.scala:134:35]
wire [2:0] _rerocc_buffer_auto_out_req_bits_opcode; // @[Protocol.scala:134:35]
wire [3:0] _rerocc_buffer_auto_out_req_bits_client_id; // @[Protocol.scala:134:35]
wire _rerocc_buffer_auto_out_req_bits_manager_id; // @[Protocol.scala:134:35]
wire [63:0] _rerocc_buffer_auto_out_req_bits_data; // @[Protocol.scala:134:35]
wire _rerocc_buffer_auto_out_resp_ready; // @[Protocol.scala:134:35]
wire _rerocc_manager_auto_in_req_ready; // @[Manager.scala:209:34]
wire _rerocc_manager_auto_in_resp_valid; // @[Manager.scala:209:34]
wire [2:0] _rerocc_manager_auto_in_resp_bits_opcode; // @[Manager.scala:209:34]
wire [3:0] _rerocc_manager_auto_in_resp_bits_client_id; // @[Manager.scala:209:34]
wire _rerocc_manager_auto_in_resp_bits_manager_id; // @[Manager.scala:209:34]
wire [63:0] _rerocc_manager_auto_in_resp_bits_data; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_valid; // @[Manager.scala:209:34]
wire [6:0] _rerocc_manager_io_cmd_bits_inst_funct; // @[Manager.scala:209:34]
wire [4:0] _rerocc_manager_io_cmd_bits_inst_rs2; // @[Manager.scala:209:34]
wire [4:0] _rerocc_manager_io_cmd_bits_inst_rs1; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_inst_xd; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_inst_xs1; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_inst_xs2; // @[Manager.scala:209:34]
wire [4:0] _rerocc_manager_io_cmd_bits_inst_rd; // @[Manager.scala:209:34]
wire [6:0] _rerocc_manager_io_cmd_bits_inst_opcode; // @[Manager.scala:209:34]
wire [63:0] _rerocc_manager_io_cmd_bits_rs1; // @[Manager.scala:209:34]
wire [63:0] _rerocc_manager_io_cmd_bits_rs2; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_debug; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_cease; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_wfi; // @[Manager.scala:209:34]
wire [31:0] _rerocc_manager_io_cmd_bits_status_isa; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_cmd_bits_status_dprv; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_dv; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_cmd_bits_status_prv; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_v; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_sd; // @[Manager.scala:209:34]
wire [22:0] _rerocc_manager_io_cmd_bits_status_zero2; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_mpv; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_gva; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_mbe; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_sbe; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_cmd_bits_status_sxl; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_cmd_bits_status_uxl; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_sd_rv32; // @[Manager.scala:209:34]
wire [7:0] _rerocc_manager_io_cmd_bits_status_zero1; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_tsr; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_tw; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_tvm; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_mxr; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_sum; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_mprv; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_cmd_bits_status_xs; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_cmd_bits_status_fs; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_cmd_bits_status_mpp; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_cmd_bits_status_vs; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_spp; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_mpie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_ube; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_spie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_upie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_mie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_hie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_sie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_cmd_bits_status_uie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_resp_ready; // @[Manager.scala:209:34]
wire [3:0] _rerocc_manager_io_ptw_ptbr_mode; // @[Manager.scala:209:34]
wire [15:0] _rerocc_manager_io_ptw_ptbr_asid; // @[Manager.scala:209:34]
wire [43:0] _rerocc_manager_io_ptw_ptbr_ppn; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_sfence_valid; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_debug; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_cease; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_wfi; // @[Manager.scala:209:34]
wire [31:0] _rerocc_manager_io_ptw_status_isa; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_dprv; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_dv; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_prv; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_v; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_sd; // @[Manager.scala:209:34]
wire [22:0] _rerocc_manager_io_ptw_status_zero2; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mpv; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_gva; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mbe; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_sbe; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_sxl; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_uxl; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_sd_rv32; // @[Manager.scala:209:34]
wire [7:0] _rerocc_manager_io_ptw_status_zero1; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_tsr; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_tw; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_tvm; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mxr; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_sum; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mprv; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_xs; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_fs; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_mpp; // @[Manager.scala:209:34]
wire [1:0] _rerocc_manager_io_ptw_status_vs; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_spp; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mpie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_ube; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_spie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_upie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_mie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_hie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_sie; // @[Manager.scala:209:34]
wire _rerocc_manager_io_ptw_status_uie; // @[Manager.scala:209:34]
wire _counter_auto_atl_out_a_valid; // @[RoCCFragments.scala:56:29]
wire [31:0] _counter_auto_atl_out_a_bits_address; // @[RoCCFragments.scala:56:29]
wire _counter_auto_atl_out_d_ready; // @[RoCCFragments.scala:56:29]
wire _counter_io_cmd_ready; // @[RoCCFragments.scala:56:29]
wire _counter_io_resp_valid; // @[RoCCFragments.scala:56:29]
wire [4:0] _counter_io_resp_bits_rd; // @[RoCCFragments.scala:56:29]
wire [63:0] _counter_io_resp_bits_data; // @[RoCCFragments.scala:56:29]
wire _counter_io_busy; // @[RoCCFragments.scala:56:29]
wire auto_ctrl_ctrl_in_a_valid_0 = auto_ctrl_ctrl_in_a_valid; // @[Manager.scala:237:34]
wire [2:0] auto_ctrl_ctrl_in_a_bits_opcode_0 = auto_ctrl_ctrl_in_a_bits_opcode; // @[Manager.scala:237:34]
wire [2:0] auto_ctrl_ctrl_in_a_bits_param_0 = auto_ctrl_ctrl_in_a_bits_param; // @[Manager.scala:237:34]
wire [2:0] auto_ctrl_ctrl_in_a_bits_size_0 = auto_ctrl_ctrl_in_a_bits_size; // @[Manager.scala:237:34]
wire [6:0] auto_ctrl_ctrl_in_a_bits_source_0 = auto_ctrl_ctrl_in_a_bits_source; // @[Manager.scala:237:34]
wire [11:0] auto_ctrl_ctrl_in_a_bits_address_0 = auto_ctrl_ctrl_in_a_bits_address; // @[Manager.scala:237:34]
wire [7:0] auto_ctrl_ctrl_in_a_bits_mask_0 = auto_ctrl_ctrl_in_a_bits_mask; // @[Manager.scala:237:34]
wire [63:0] auto_ctrl_ctrl_in_a_bits_data_0 = auto_ctrl_ctrl_in_a_bits_data; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_a_bits_corrupt_0 = auto_ctrl_ctrl_in_a_bits_corrupt; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_d_ready_0 = auto_ctrl_ctrl_in_d_ready; // @[Manager.scala:237:34]
wire auto_buffer_out_a_ready_0 = auto_buffer_out_a_ready; // @[Manager.scala:237:34]
wire auto_buffer_out_b_valid_0 = auto_buffer_out_b_valid; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_b_bits_opcode_0 = auto_buffer_out_b_bits_opcode; // @[Manager.scala:237:34]
wire [1:0] auto_buffer_out_b_bits_param_0 = auto_buffer_out_b_bits_param; // @[Manager.scala:237:34]
wire [3:0] auto_buffer_out_b_bits_size_0 = auto_buffer_out_b_bits_size; // @[Manager.scala:237:34]
wire [1:0] auto_buffer_out_b_bits_source_0 = auto_buffer_out_b_bits_source; // @[Manager.scala:237:34]
wire [31:0] auto_buffer_out_b_bits_address_0 = auto_buffer_out_b_bits_address; // @[Manager.scala:237:34]
wire [7:0] auto_buffer_out_b_bits_mask_0 = auto_buffer_out_b_bits_mask; // @[Manager.scala:237:34]
wire [63:0] auto_buffer_out_b_bits_data_0 = auto_buffer_out_b_bits_data; // @[Manager.scala:237:34]
wire auto_buffer_out_b_bits_corrupt_0 = auto_buffer_out_b_bits_corrupt; // @[Manager.scala:237:34]
wire auto_buffer_out_c_ready_0 = auto_buffer_out_c_ready; // @[Manager.scala:237:34]
wire auto_buffer_out_d_valid_0 = auto_buffer_out_d_valid; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_d_bits_opcode_0 = auto_buffer_out_d_bits_opcode; // @[Manager.scala:237:34]
wire [1:0] auto_buffer_out_d_bits_param_0 = auto_buffer_out_d_bits_param; // @[Manager.scala:237:34]
wire [3:0] auto_buffer_out_d_bits_size_0 = auto_buffer_out_d_bits_size; // @[Manager.scala:237:34]
wire [1:0] auto_buffer_out_d_bits_source_0 = auto_buffer_out_d_bits_source; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_d_bits_sink_0 = auto_buffer_out_d_bits_sink; // @[Manager.scala:237:34]
wire auto_buffer_out_d_bits_denied_0 = auto_buffer_out_d_bits_denied; // @[Manager.scala:237:34]
wire [63:0] auto_buffer_out_d_bits_data_0 = auto_buffer_out_d_bits_data; // @[Manager.scala:237:34]
wire auto_buffer_out_d_bits_corrupt_0 = auto_buffer_out_d_bits_corrupt; // @[Manager.scala:237:34]
wire auto_buffer_out_e_ready_0 = auto_buffer_out_e_ready; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_req_valid_0 = auto_re_ro_cc_in_req_valid; // @[Manager.scala:237:34]
wire [2:0] auto_re_ro_cc_in_req_bits_opcode_0 = auto_re_ro_cc_in_req_bits_opcode; // @[Manager.scala:237:34]
wire [3:0] auto_re_ro_cc_in_req_bits_client_id_0 = auto_re_ro_cc_in_req_bits_client_id; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_req_bits_manager_id_0 = auto_re_ro_cc_in_req_bits_manager_id; // @[Manager.scala:237:34]
wire [63:0] auto_re_ro_cc_in_req_bits_data_0 = auto_re_ro_cc_in_req_bits_data; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_resp_ready_0 = auto_re_ro_cc_in_resp_ready; // @[Manager.scala:237:34]
wire [6:0] auto_rerocc_manager_id_sink_in_0 = auto_rerocc_manager_id_sink_in; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_d_bits_sink = 1'h0; // @[Xbar.scala:346:26]
wire auto_ctrl_ctrl_in_d_bits_denied = 1'h0; // @[Xbar.scala:346:26]
wire auto_ctrl_ctrl_in_d_bits_corrupt = 1'h0; // @[Xbar.scala:346:26]
wire widget_auto_anon_in_a_bits_corrupt = 1'h0; // @[Xbar.scala:346:26]
wire widget_auto_anon_in_c_bits_corrupt = 1'h0; // @[Xbar.scala:346:26]
wire widget_auto_anon_out_a_bits_corrupt = 1'h0; // @[Xbar.scala:346:26]
wire widget_auto_anon_out_c_bits_corrupt = 1'h0; // @[Xbar.scala:346:26]
wire widget_anonOut_a_bits_corrupt = 1'h0; // @[Xbar.scala:346:26]
wire widget_anonOut_c_bits_corrupt = 1'h0; // @[Xbar.scala:346:26]
wire widget_anonIn_a_bits_corrupt = 1'h0; // @[Xbar.scala:346:26]
wire widget_anonIn_c_bits_corrupt = 1'h0; // @[Xbar.scala:346:26]
wire [1:0] auto_ctrl_ctrl_in_d_bits_param = 2'h0; // @[Manager.scala:209:34, :226:61, :235:24, :237:34, :243:21, :255:22]
wire reRoCCNodeIn_req_ready; // @[MixedNode.scala:551:17]
wire reRoCCNodeIn_req_valid = auto_re_ro_cc_in_req_valid_0; // @[Manager.scala:237:34]
wire [2:0] reRoCCNodeIn_req_bits_opcode = auto_re_ro_cc_in_req_bits_opcode_0; // @[Manager.scala:237:34]
wire [3:0] reRoCCNodeIn_req_bits_client_id = auto_re_ro_cc_in_req_bits_client_id_0; // @[Manager.scala:237:34]
wire reRoCCNodeIn_req_bits_manager_id = auto_re_ro_cc_in_req_bits_manager_id_0; // @[Manager.scala:237:34]
wire [63:0] reRoCCNodeIn_req_bits_data = auto_re_ro_cc_in_req_bits_data_0; // @[Manager.scala:237:34]
wire reRoCCNodeIn_resp_ready = auto_re_ro_cc_in_resp_ready_0; // @[Manager.scala:237:34]
wire reRoCCNodeIn_resp_valid; // @[MixedNode.scala:551:17]
wire [2:0] reRoCCNodeIn_resp_bits_opcode; // @[MixedNode.scala:551:17]
wire [3:0] reRoCCNodeIn_resp_bits_client_id; // @[MixedNode.scala:551:17]
wire reRoCCNodeIn_resp_bits_manager_id; // @[MixedNode.scala:551:17]
wire [63:0] reRoCCNodeIn_resp_bits_data; // @[MixedNode.scala:551:17]
wire [6:0] reroccManagerIdSinkNodeIn = auto_rerocc_manager_id_sink_in_0; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_a_ready_0; // @[Manager.scala:237:34]
wire [2:0] auto_ctrl_ctrl_in_d_bits_opcode_0; // @[Manager.scala:237:34]
wire [2:0] auto_ctrl_ctrl_in_d_bits_size_0; // @[Manager.scala:237:34]
wire [6:0] auto_ctrl_ctrl_in_d_bits_source_0; // @[Manager.scala:237:34]
wire [63:0] auto_ctrl_ctrl_in_d_bits_data_0; // @[Manager.scala:237:34]
wire auto_ctrl_ctrl_in_d_valid_0; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_a_bits_opcode_0; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_a_bits_param_0; // @[Manager.scala:237:34]
wire [3:0] auto_buffer_out_a_bits_size_0; // @[Manager.scala:237:34]
wire [1:0] auto_buffer_out_a_bits_source_0; // @[Manager.scala:237:34]
wire [31:0] auto_buffer_out_a_bits_address_0; // @[Manager.scala:237:34]
wire [7:0] auto_buffer_out_a_bits_mask_0; // @[Manager.scala:237:34]
wire [63:0] auto_buffer_out_a_bits_data_0; // @[Manager.scala:237:34]
wire auto_buffer_out_a_bits_corrupt_0; // @[Manager.scala:237:34]
wire auto_buffer_out_a_valid_0; // @[Manager.scala:237:34]
wire auto_buffer_out_b_ready_0; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_c_bits_opcode_0; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_c_bits_param_0; // @[Manager.scala:237:34]
wire [3:0] auto_buffer_out_c_bits_size_0; // @[Manager.scala:237:34]
wire [1:0] auto_buffer_out_c_bits_source_0; // @[Manager.scala:237:34]
wire [31:0] auto_buffer_out_c_bits_address_0; // @[Manager.scala:237:34]
wire [63:0] auto_buffer_out_c_bits_data_0; // @[Manager.scala:237:34]
wire auto_buffer_out_c_bits_corrupt_0; // @[Manager.scala:237:34]
wire auto_buffer_out_c_valid_0; // @[Manager.scala:237:34]
wire auto_buffer_out_d_ready_0; // @[Manager.scala:237:34]
wire [2:0] auto_buffer_out_e_bits_sink_0; // @[Manager.scala:237:34]
wire auto_buffer_out_e_valid_0; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_req_ready_0; // @[Manager.scala:237:34]
wire [2:0] auto_re_ro_cc_in_resp_bits_opcode_0; // @[Manager.scala:237:34]
wire [3:0] auto_re_ro_cc_in_resp_bits_client_id_0; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_resp_bits_manager_id_0; // @[Manager.scala:237:34]
wire [63:0] auto_re_ro_cc_in_resp_bits_data_0; // @[Manager.scala:237:34]
wire auto_re_ro_cc_in_resp_valid_0; // @[Manager.scala:237:34]
wire widget_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_anonIn_a_valid = widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_opcode = widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_param = widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonIn_a_bits_size = widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_source = widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonIn_a_bits_address = widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] widget_anonIn_a_bits_mask = widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonIn_a_bits_data = widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonIn_b_ready = widget_auto_anon_in_b_ready; // @[WidthWidget.scala:27:9]
wire widget_anonIn_b_valid; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonIn_b_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] widget_anonIn_b_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonIn_b_bits_size; // @[MixedNode.scala:551:17]
wire widget_anonIn_b_bits_source; // @[MixedNode.scala:551:17]
wire [31:0] widget_anonIn_b_bits_address; // @[MixedNode.scala:551:17]
wire [7:0] widget_anonIn_b_bits_mask; // @[MixedNode.scala:551:17]
wire [63:0] widget_anonIn_b_bits_data; // @[MixedNode.scala:551:17]
wire widget_anonIn_b_bits_corrupt; // @[MixedNode.scala:551:17]
wire widget_anonIn_c_ready; // @[MixedNode.scala:551:17]
wire widget_anonIn_c_valid = widget_auto_anon_in_c_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_c_bits_opcode = widget_auto_anon_in_c_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_c_bits_param = widget_auto_anon_in_c_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonIn_c_bits_size = widget_auto_anon_in_c_bits_size; // @[WidthWidget.scala:27:9]
wire widget_anonIn_c_bits_source = widget_auto_anon_in_c_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonIn_c_bits_address = widget_auto_anon_in_c_bits_address; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonIn_c_bits_data = widget_auto_anon_in_c_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_ready = widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire widget_anonIn_e_ready; // @[MixedNode.scala:551:17]
wire widget_anonIn_e_valid = widget_auto_anon_in_e_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_e_bits_sink = widget_auto_anon_in_e_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_ready = widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] widget_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire widget_anonOut_b_ready; // @[MixedNode.scala:542:17]
wire widget_anonOut_b_valid = widget_auto_anon_out_b_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_b_bits_opcode = widget_auto_anon_out_b_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_anonOut_b_bits_param = widget_auto_anon_out_b_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonOut_b_bits_size = widget_auto_anon_out_b_bits_size; // @[WidthWidget.scala:27:9]
wire widget_anonOut_b_bits_source = widget_auto_anon_out_b_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonOut_b_bits_address = widget_auto_anon_out_b_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] widget_anonOut_b_bits_mask = widget_auto_anon_out_b_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonOut_b_bits_data = widget_auto_anon_out_b_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonOut_b_bits_corrupt = widget_auto_anon_out_b_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_anonOut_c_ready = widget_auto_anon_out_c_ready; // @[WidthWidget.scala:27:9]
wire widget_anonOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] widget_anonOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] widget_anonOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] widget_anonOut_c_bits_size; // @[MixedNode.scala:542:17]
wire widget_anonOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] widget_anonOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [63:0] widget_anonOut_c_bits_data; // @[MixedNode.scala:542:17]
wire widget_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire widget_anonOut_d_valid = widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_d_bits_opcode = widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_anonOut_d_bits_param = widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonOut_d_bits_size = widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_source = widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_d_bits_sink = widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_denied = widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonOut_d_bits_data = widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_corrupt = widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_anonOut_e_ready = widget_auto_anon_out_e_ready; // @[WidthWidget.scala:27:9]
wire widget_anonOut_e_valid; // @[MixedNode.scala:542:17]
wire [2:0] widget_anonOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_b_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_in_b_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_b_bits_size; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_b_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_b_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] widget_auto_anon_in_b_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_b_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_b_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_b_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_c_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_e_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_b_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_c_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_c_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_c_bits_size; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_c_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_out_c_bits_address; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_out_c_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_c_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_e_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_e_valid; // @[WidthWidget.scala:27:9]
assign widget_anonIn_a_ready = widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_a_valid = widget_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_opcode = widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_param = widget_anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_size = widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_source = widget_anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_address = widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_mask = widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_data = widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_b_ready = widget_anonOut_b_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_b_valid = widget_anonOut_b_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_opcode = widget_anonOut_b_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_param = widget_anonOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_size = widget_anonOut_b_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_source = widget_anonOut_b_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_address = widget_anonOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_mask = widget_anonOut_b_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_data = widget_anonOut_b_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_b_bits_corrupt = widget_anonOut_b_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_c_ready = widget_anonOut_c_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_c_valid = widget_anonOut_c_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_opcode = widget_anonOut_c_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_param = widget_anonOut_c_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_size = widget_anonOut_c_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_source = widget_anonOut_c_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_address = widget_anonOut_c_bits_address; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_c_bits_data = widget_anonOut_c_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_ready = widget_anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_d_valid = widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_opcode = widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_param = widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_size = widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_source = widget_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_sink = widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_denied = widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_data = widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_corrupt = widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_e_ready = widget_anonOut_e_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_e_valid = widget_anonOut_e_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_e_bits_sink = widget_anonOut_e_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_a_ready = widget_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign widget_anonOut_a_valid = widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_opcode = widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_param = widget_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_size = widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_source = widget_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_address = widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_mask = widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_data = widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_b_ready = widget_anonIn_b_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_b_valid = widget_anonIn_b_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_opcode = widget_anonIn_b_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_param = widget_anonIn_b_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_size = widget_anonIn_b_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_source = widget_anonIn_b_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_address = widget_anonIn_b_bits_address; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_mask = widget_anonIn_b_bits_mask; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_data = widget_anonIn_b_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_b_bits_corrupt = widget_anonIn_b_bits_corrupt; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_c_ready = widget_anonIn_c_ready; // @[WidthWidget.scala:27:9]
assign widget_anonOut_c_valid = widget_anonIn_c_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_opcode = widget_anonIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_param = widget_anonIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_size = widget_anonIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_source = widget_anonIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_address = widget_anonIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_c_bits_data = widget_anonIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_d_ready = widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_d_valid = widget_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_opcode = widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_param = widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_size = widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_source = widget_anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_sink = widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_denied = widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_data = widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_corrupt = widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_e_ready = widget_anonIn_e_ready; // @[WidthWidget.scala:27:9]
assign widget_anonOut_e_valid = widget_anonIn_e_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_e_bits_sink = widget_anonIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeIn_req_ready = reRoCCNodeOut_req_ready; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeIn_resp_valid = reRoCCNodeOut_resp_valid; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeIn_resp_bits_opcode = reRoCCNodeOut_resp_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeIn_resp_bits_client_id = reRoCCNodeOut_resp_bits_client_id; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeIn_resp_bits_manager_id = reRoCCNodeOut_resp_bits_manager_id; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] reRoCCNodeOut_req_bits_opcode; // @[MixedNode.scala:542:17]
wire [3:0] reRoCCNodeOut_req_bits_client_id; // @[MixedNode.scala:542:17]
wire reRoCCNodeOut_req_bits_manager_id; // @[MixedNode.scala:542:17]
wire [63:0] reRoCCNodeOut_req_bits_data; // @[MixedNode.scala:542:17]
assign reRoCCNodeIn_resp_bits_data = reRoCCNodeOut_resp_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire reRoCCNodeOut_req_valid; // @[MixedNode.scala:542:17]
wire reRoCCNodeOut_resp_ready; // @[MixedNode.scala:542:17]
assign auto_re_ro_cc_in_req_ready_0 = reRoCCNodeIn_req_ready; // @[Manager.scala:237:34]
assign reRoCCNodeOut_req_valid = reRoCCNodeIn_req_valid; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeOut_req_bits_opcode = reRoCCNodeIn_req_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeOut_req_bits_client_id = reRoCCNodeIn_req_bits_client_id; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeOut_req_bits_manager_id = reRoCCNodeIn_req_bits_manager_id; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeOut_req_bits_data = reRoCCNodeIn_req_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign reRoCCNodeOut_resp_ready = reRoCCNodeIn_resp_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_re_ro_cc_in_resp_valid_0 = reRoCCNodeIn_resp_valid; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_opcode_0 = reRoCCNodeIn_resp_bits_opcode; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_client_id_0 = reRoCCNodeIn_resp_bits_client_id; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_manager_id_0 = reRoCCNodeIn_resp_bits_manager_id; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_data_0 = reRoCCNodeIn_resp_bits_data; // @[Manager.scala:237:34]
CharacterCountExample counter ( // @[RoCCFragments.scala:56:29]
.clock (clock),
.reset (reset),
.auto_atl_out_a_ready (_xbar_auto_anon_in_0_a_ready), // @[Xbar.scala:346:26]
.auto_atl_out_a_valid (_counter_auto_atl_out_a_valid),
.auto_atl_out_a_bits_address (_counter_auto_atl_out_a_bits_address),
.auto_atl_out_d_ready (_counter_auto_atl_out_d_ready),
.auto_atl_out_d_valid (_xbar_auto_anon_in_0_d_valid), // @[Xbar.scala:346:26]
.auto_atl_out_d_bits_opcode (_xbar_auto_anon_in_0_d_bits_opcode), // @[Xbar.scala:346:26]
.auto_atl_out_d_bits_param (_xbar_auto_anon_in_0_d_bits_param), // @[Xbar.scala:346:26]
.auto_atl_out_d_bits_size (_xbar_auto_anon_in_0_d_bits_size), // @[Xbar.scala:346:26]
.auto_atl_out_d_bits_sink (_xbar_auto_anon_in_0_d_bits_sink), // @[Xbar.scala:346:26]
.auto_atl_out_d_bits_denied (_xbar_auto_anon_in_0_d_bits_denied), // @[Xbar.scala:346:26]
.auto_atl_out_d_bits_data (_xbar_auto_anon_in_0_d_bits_data), // @[Xbar.scala:346:26]
.auto_atl_out_d_bits_corrupt (_xbar_auto_anon_in_0_d_bits_corrupt), // @[Xbar.scala:346:26]
.io_cmd_ready (_counter_io_cmd_ready),
.io_cmd_valid (_rerocc_manager_io_cmd_valid), // @[Manager.scala:209:34]
.io_cmd_bits_inst_funct (_rerocc_manager_io_cmd_bits_inst_funct), // @[Manager.scala:209:34]
.io_cmd_bits_inst_rs2 (_rerocc_manager_io_cmd_bits_inst_rs2), // @[Manager.scala:209:34]
.io_cmd_bits_inst_rs1 (_rerocc_manager_io_cmd_bits_inst_rs1), // @[Manager.scala:209:34]
.io_cmd_bits_inst_xd (_rerocc_manager_io_cmd_bits_inst_xd), // @[Manager.scala:209:34]
.io_cmd_bits_inst_xs1 (_rerocc_manager_io_cmd_bits_inst_xs1), // @[Manager.scala:209:34]
.io_cmd_bits_inst_xs2 (_rerocc_manager_io_cmd_bits_inst_xs2), // @[Manager.scala:209:34]
.io_cmd_bits_inst_rd (_rerocc_manager_io_cmd_bits_inst_rd), // @[Manager.scala:209:34]
.io_cmd_bits_inst_opcode (_rerocc_manager_io_cmd_bits_inst_opcode), // @[Manager.scala:209:34]
.io_cmd_bits_rs1 (_rerocc_manager_io_cmd_bits_rs1), // @[Manager.scala:209:34]
.io_cmd_bits_rs2 (_rerocc_manager_io_cmd_bits_rs2), // @[Manager.scala:209:34]
.io_cmd_bits_status_debug (_rerocc_manager_io_cmd_bits_status_debug), // @[Manager.scala:209:34]
.io_cmd_bits_status_cease (_rerocc_manager_io_cmd_bits_status_cease), // @[Manager.scala:209:34]
.io_cmd_bits_status_wfi (_rerocc_manager_io_cmd_bits_status_wfi), // @[Manager.scala:209:34]
.io_cmd_bits_status_isa (_rerocc_manager_io_cmd_bits_status_isa), // @[Manager.scala:209:34]
.io_cmd_bits_status_dprv (_rerocc_manager_io_cmd_bits_status_dprv), // @[Manager.scala:209:34]
.io_cmd_bits_status_dv (_rerocc_manager_io_cmd_bits_status_dv), // @[Manager.scala:209:34]
.io_cmd_bits_status_prv (_rerocc_manager_io_cmd_bits_status_prv), // @[Manager.scala:209:34]
.io_cmd_bits_status_v (_rerocc_manager_io_cmd_bits_status_v), // @[Manager.scala:209:34]
.io_cmd_bits_status_sd (_rerocc_manager_io_cmd_bits_status_sd), // @[Manager.scala:209:34]
.io_cmd_bits_status_zero2 (_rerocc_manager_io_cmd_bits_status_zero2), // @[Manager.scala:209:34]
.io_cmd_bits_status_mpv (_rerocc_manager_io_cmd_bits_status_mpv), // @[Manager.scala:209:34]
.io_cmd_bits_status_gva (_rerocc_manager_io_cmd_bits_status_gva), // @[Manager.scala:209:34]
.io_cmd_bits_status_mbe (_rerocc_manager_io_cmd_bits_status_mbe), // @[Manager.scala:209:34]
.io_cmd_bits_status_sbe (_rerocc_manager_io_cmd_bits_status_sbe), // @[Manager.scala:209:34]
.io_cmd_bits_status_sxl (_rerocc_manager_io_cmd_bits_status_sxl), // @[Manager.scala:209:34]
.io_cmd_bits_status_uxl (_rerocc_manager_io_cmd_bits_status_uxl), // @[Manager.scala:209:34]
.io_cmd_bits_status_sd_rv32 (_rerocc_manager_io_cmd_bits_status_sd_rv32), // @[Manager.scala:209:34]
.io_cmd_bits_status_zero1 (_rerocc_manager_io_cmd_bits_status_zero1), // @[Manager.scala:209:34]
.io_cmd_bits_status_tsr (_rerocc_manager_io_cmd_bits_status_tsr), // @[Manager.scala:209:34]
.io_cmd_bits_status_tw (_rerocc_manager_io_cmd_bits_status_tw), // @[Manager.scala:209:34]
.io_cmd_bits_status_tvm (_rerocc_manager_io_cmd_bits_status_tvm), // @[Manager.scala:209:34]
.io_cmd_bits_status_mxr (_rerocc_manager_io_cmd_bits_status_mxr), // @[Manager.scala:209:34]
.io_cmd_bits_status_sum (_rerocc_manager_io_cmd_bits_status_sum), // @[Manager.scala:209:34]
.io_cmd_bits_status_mprv (_rerocc_manager_io_cmd_bits_status_mprv), // @[Manager.scala:209:34]
.io_cmd_bits_status_xs (_rerocc_manager_io_cmd_bits_status_xs), // @[Manager.scala:209:34]
.io_cmd_bits_status_fs (_rerocc_manager_io_cmd_bits_status_fs), // @[Manager.scala:209:34]
.io_cmd_bits_status_mpp (_rerocc_manager_io_cmd_bits_status_mpp), // @[Manager.scala:209:34]
.io_cmd_bits_status_vs (_rerocc_manager_io_cmd_bits_status_vs), // @[Manager.scala:209:34]
.io_cmd_bits_status_spp (_rerocc_manager_io_cmd_bits_status_spp), // @[Manager.scala:209:34]
.io_cmd_bits_status_mpie (_rerocc_manager_io_cmd_bits_status_mpie), // @[Manager.scala:209:34]
.io_cmd_bits_status_ube (_rerocc_manager_io_cmd_bits_status_ube), // @[Manager.scala:209:34]
.io_cmd_bits_status_spie (_rerocc_manager_io_cmd_bits_status_spie), // @[Manager.scala:209:34]
.io_cmd_bits_status_upie (_rerocc_manager_io_cmd_bits_status_upie), // @[Manager.scala:209:34]
.io_cmd_bits_status_mie (_rerocc_manager_io_cmd_bits_status_mie), // @[Manager.scala:209:34]
.io_cmd_bits_status_hie (_rerocc_manager_io_cmd_bits_status_hie), // @[Manager.scala:209:34]
.io_cmd_bits_status_sie (_rerocc_manager_io_cmd_bits_status_sie), // @[Manager.scala:209:34]
.io_cmd_bits_status_uie (_rerocc_manager_io_cmd_bits_status_uie), // @[Manager.scala:209:34]
.io_resp_ready (_rerocc_manager_io_resp_ready), // @[Manager.scala:209:34]
.io_resp_valid (_counter_io_resp_valid),
.io_resp_bits_rd (_counter_io_resp_bits_rd),
.io_resp_bits_data (_counter_io_resp_bits_data),
.io_mem_req_ready (_dcIF_io_requestor_req_ready), // @[Manager.scala:255:22]
.io_mem_resp_valid (_dcIF_io_requestor_resp_valid), // @[Manager.scala:255:22]
.io_mem_resp_bits_addr (_dcIF_io_requestor_resp_bits_addr), // @[Manager.scala:255:22]
.io_mem_resp_bits_tag (_dcIF_io_requestor_resp_bits_tag), // @[Manager.scala:255:22]
.io_mem_resp_bits_cmd (_dcIF_io_requestor_resp_bits_cmd), // @[Manager.scala:255:22]
.io_mem_resp_bits_size (_dcIF_io_requestor_resp_bits_size), // @[Manager.scala:255:22]
.io_mem_resp_bits_signed (_dcIF_io_requestor_resp_bits_signed), // @[Manager.scala:255:22]
.io_mem_resp_bits_dprv (_dcIF_io_requestor_resp_bits_dprv), // @[Manager.scala:255:22]
.io_mem_resp_bits_dv (_dcIF_io_requestor_resp_bits_dv), // @[Manager.scala:255:22]
.io_mem_resp_bits_data (_dcIF_io_requestor_resp_bits_data), // @[Manager.scala:255:22]
.io_mem_resp_bits_mask (_dcIF_io_requestor_resp_bits_mask), // @[Manager.scala:255:22]
.io_mem_resp_bits_replay (_dcIF_io_requestor_resp_bits_replay), // @[Manager.scala:255:22]
.io_mem_resp_bits_has_data (_dcIF_io_requestor_resp_bits_has_data), // @[Manager.scala:255:22]
.io_mem_resp_bits_data_word_bypass (_dcIF_io_requestor_resp_bits_data_word_bypass), // @[Manager.scala:255:22]
.io_mem_resp_bits_data_raw (_dcIF_io_requestor_resp_bits_data_raw), // @[Manager.scala:255:22]
.io_mem_resp_bits_store_data (_dcIF_io_requestor_resp_bits_store_data), // @[Manager.scala:255:22]
.io_busy (_counter_io_busy)
); // @[RoCCFragments.scala:56:29]
ReRoCCManager_4 rerocc_manager ( // @[Manager.scala:209:34]
.clock (clock),
.reset (reset),
.auto_in_req_ready (_rerocc_manager_auto_in_req_ready),
.auto_in_req_valid (_rerocc_buffer_auto_out_req_valid), // @[Protocol.scala:134:35]
.auto_in_req_bits_opcode (_rerocc_buffer_auto_out_req_bits_opcode), // @[Protocol.scala:134:35]
.auto_in_req_bits_client_id (_rerocc_buffer_auto_out_req_bits_client_id), // @[Protocol.scala:134:35]
.auto_in_req_bits_manager_id (_rerocc_buffer_auto_out_req_bits_manager_id), // @[Protocol.scala:134:35]
.auto_in_req_bits_data (_rerocc_buffer_auto_out_req_bits_data), // @[Protocol.scala:134:35]
.auto_in_resp_ready (_rerocc_buffer_auto_out_resp_ready), // @[Protocol.scala:134:35]
.auto_in_resp_valid (_rerocc_manager_auto_in_resp_valid),
.auto_in_resp_bits_opcode (_rerocc_manager_auto_in_resp_bits_opcode),
.auto_in_resp_bits_client_id (_rerocc_manager_auto_in_resp_bits_client_id),
.auto_in_resp_bits_manager_id (_rerocc_manager_auto_in_resp_bits_manager_id),
.auto_in_resp_bits_data (_rerocc_manager_auto_in_resp_bits_data),
.io_manager_id (reroccManagerIdSinkNodeIn[2:0]), // @[Manager.scala:262:41]
.io_cmd_ready (_counter_io_cmd_ready), // @[RoCCFragments.scala:56:29]
.io_cmd_valid (_rerocc_manager_io_cmd_valid),
.io_cmd_bits_inst_funct (_rerocc_manager_io_cmd_bits_inst_funct),
.io_cmd_bits_inst_rs2 (_rerocc_manager_io_cmd_bits_inst_rs2),
.io_cmd_bits_inst_rs1 (_rerocc_manager_io_cmd_bits_inst_rs1),
.io_cmd_bits_inst_xd (_rerocc_manager_io_cmd_bits_inst_xd),
.io_cmd_bits_inst_xs1 (_rerocc_manager_io_cmd_bits_inst_xs1),
.io_cmd_bits_inst_xs2 (_rerocc_manager_io_cmd_bits_inst_xs2),
.io_cmd_bits_inst_rd (_rerocc_manager_io_cmd_bits_inst_rd),
.io_cmd_bits_inst_opcode (_rerocc_manager_io_cmd_bits_inst_opcode),
.io_cmd_bits_rs1 (_rerocc_manager_io_cmd_bits_rs1),
.io_cmd_bits_rs2 (_rerocc_manager_io_cmd_bits_rs2),
.io_cmd_bits_status_debug (_rerocc_manager_io_cmd_bits_status_debug),
.io_cmd_bits_status_cease (_rerocc_manager_io_cmd_bits_status_cease),
.io_cmd_bits_status_wfi (_rerocc_manager_io_cmd_bits_status_wfi),
.io_cmd_bits_status_isa (_rerocc_manager_io_cmd_bits_status_isa),
.io_cmd_bits_status_dprv (_rerocc_manager_io_cmd_bits_status_dprv),
.io_cmd_bits_status_dv (_rerocc_manager_io_cmd_bits_status_dv),
.io_cmd_bits_status_prv (_rerocc_manager_io_cmd_bits_status_prv),
.io_cmd_bits_status_v (_rerocc_manager_io_cmd_bits_status_v),
.io_cmd_bits_status_sd (_rerocc_manager_io_cmd_bits_status_sd),
.io_cmd_bits_status_zero2 (_rerocc_manager_io_cmd_bits_status_zero2),
.io_cmd_bits_status_mpv (_rerocc_manager_io_cmd_bits_status_mpv),
.io_cmd_bits_status_gva (_rerocc_manager_io_cmd_bits_status_gva),
.io_cmd_bits_status_mbe (_rerocc_manager_io_cmd_bits_status_mbe),
.io_cmd_bits_status_sbe (_rerocc_manager_io_cmd_bits_status_sbe),
.io_cmd_bits_status_sxl (_rerocc_manager_io_cmd_bits_status_sxl),
.io_cmd_bits_status_uxl (_rerocc_manager_io_cmd_bits_status_uxl),
.io_cmd_bits_status_sd_rv32 (_rerocc_manager_io_cmd_bits_status_sd_rv32),
.io_cmd_bits_status_zero1 (_rerocc_manager_io_cmd_bits_status_zero1),
.io_cmd_bits_status_tsr (_rerocc_manager_io_cmd_bits_status_tsr),
.io_cmd_bits_status_tw (_rerocc_manager_io_cmd_bits_status_tw),
.io_cmd_bits_status_tvm (_rerocc_manager_io_cmd_bits_status_tvm),
.io_cmd_bits_status_mxr (_rerocc_manager_io_cmd_bits_status_mxr),
.io_cmd_bits_status_sum (_rerocc_manager_io_cmd_bits_status_sum),
.io_cmd_bits_status_mprv (_rerocc_manager_io_cmd_bits_status_mprv),
.io_cmd_bits_status_xs (_rerocc_manager_io_cmd_bits_status_xs),
.io_cmd_bits_status_fs (_rerocc_manager_io_cmd_bits_status_fs),
.io_cmd_bits_status_mpp (_rerocc_manager_io_cmd_bits_status_mpp),
.io_cmd_bits_status_vs (_rerocc_manager_io_cmd_bits_status_vs),
.io_cmd_bits_status_spp (_rerocc_manager_io_cmd_bits_status_spp),
.io_cmd_bits_status_mpie (_rerocc_manager_io_cmd_bits_status_mpie),
.io_cmd_bits_status_ube (_rerocc_manager_io_cmd_bits_status_ube),
.io_cmd_bits_status_spie (_rerocc_manager_io_cmd_bits_status_spie),
.io_cmd_bits_status_upie (_rerocc_manager_io_cmd_bits_status_upie),
.io_cmd_bits_status_mie (_rerocc_manager_io_cmd_bits_status_mie),
.io_cmd_bits_status_hie (_rerocc_manager_io_cmd_bits_status_hie),
.io_cmd_bits_status_sie (_rerocc_manager_io_cmd_bits_status_sie),
.io_cmd_bits_status_uie (_rerocc_manager_io_cmd_bits_status_uie),
.io_resp_ready (_rerocc_manager_io_resp_ready),
.io_resp_valid (_counter_io_resp_valid), // @[RoCCFragments.scala:56:29]
.io_resp_bits_rd (_counter_io_resp_bits_rd), // @[RoCCFragments.scala:56:29]
.io_resp_bits_data (_counter_io_resp_bits_data), // @[RoCCFragments.scala:56:29]
.io_busy (_counter_io_busy), // @[RoCCFragments.scala:56:29]
.io_ptw_ptbr_mode (_rerocc_manager_io_ptw_ptbr_mode),
.io_ptw_ptbr_asid (_rerocc_manager_io_ptw_ptbr_asid),
.io_ptw_ptbr_ppn (_rerocc_manager_io_ptw_ptbr_ppn),
.io_ptw_sfence_valid (_rerocc_manager_io_ptw_sfence_valid),
.io_ptw_status_debug (_rerocc_manager_io_ptw_status_debug),
.io_ptw_status_cease (_rerocc_manager_io_ptw_status_cease),
.io_ptw_status_wfi (_rerocc_manager_io_ptw_status_wfi),
.io_ptw_status_isa (_rerocc_manager_io_ptw_status_isa),
.io_ptw_status_dprv (_rerocc_manager_io_ptw_status_dprv),
.io_ptw_status_dv (_rerocc_manager_io_ptw_status_dv),
.io_ptw_status_prv (_rerocc_manager_io_ptw_status_prv),
.io_ptw_status_v (_rerocc_manager_io_ptw_status_v),
.io_ptw_status_sd (_rerocc_manager_io_ptw_status_sd),
.io_ptw_status_zero2 (_rerocc_manager_io_ptw_status_zero2),
.io_ptw_status_mpv (_rerocc_manager_io_ptw_status_mpv),
.io_ptw_status_gva (_rerocc_manager_io_ptw_status_gva),
.io_ptw_status_mbe (_rerocc_manager_io_ptw_status_mbe),
.io_ptw_status_sbe (_rerocc_manager_io_ptw_status_sbe),
.io_ptw_status_sxl (_rerocc_manager_io_ptw_status_sxl),
.io_ptw_status_uxl (_rerocc_manager_io_ptw_status_uxl),
.io_ptw_status_sd_rv32 (_rerocc_manager_io_ptw_status_sd_rv32),
.io_ptw_status_zero1 (_rerocc_manager_io_ptw_status_zero1),
.io_ptw_status_tsr (_rerocc_manager_io_ptw_status_tsr),
.io_ptw_status_tw (_rerocc_manager_io_ptw_status_tw),
.io_ptw_status_tvm (_rerocc_manager_io_ptw_status_tvm),
.io_ptw_status_mxr (_rerocc_manager_io_ptw_status_mxr),
.io_ptw_status_sum (_rerocc_manager_io_ptw_status_sum),
.io_ptw_status_mprv (_rerocc_manager_io_ptw_status_mprv),
.io_ptw_status_xs (_rerocc_manager_io_ptw_status_xs),
.io_ptw_status_fs (_rerocc_manager_io_ptw_status_fs),
.io_ptw_status_mpp (_rerocc_manager_io_ptw_status_mpp),
.io_ptw_status_vs (_rerocc_manager_io_ptw_status_vs),
.io_ptw_status_spp (_rerocc_manager_io_ptw_status_spp),
.io_ptw_status_mpie (_rerocc_manager_io_ptw_status_mpie),
.io_ptw_status_ube (_rerocc_manager_io_ptw_status_ube),
.io_ptw_status_spie (_rerocc_manager_io_ptw_status_spie),
.io_ptw_status_upie (_rerocc_manager_io_ptw_status_upie),
.io_ptw_status_mie (_rerocc_manager_io_ptw_status_mie),
.io_ptw_status_hie (_rerocc_manager_io_ptw_status_hie),
.io_ptw_status_sie (_rerocc_manager_io_ptw_status_sie),
.io_ptw_status_uie (_rerocc_manager_io_ptw_status_uie),
.io_ptw_perf_pte_miss (_ptw_io_dpath_perf_pte_miss), // @[Manager.scala:243:21]
.io_ptw_clock_enabled (_ptw_io_dpath_clock_enabled) // @[Manager.scala:243:21]
); // @[Manager.scala:209:34]
ReRoCCBuffer_5 rerocc_buffer ( // @[Protocol.scala:134:35]
.clock (clock),
.reset (reset),
.auto_in_req_ready (reRoCCNodeOut_req_ready),
.auto_in_req_valid (reRoCCNodeOut_req_valid), // @[MixedNode.scala:542:17]
.auto_in_req_bits_opcode (reRoCCNodeOut_req_bits_opcode), // @[MixedNode.scala:542:17]
.auto_in_req_bits_client_id (reRoCCNodeOut_req_bits_client_id), // @[MixedNode.scala:542:17]
.auto_in_req_bits_manager_id (reRoCCNodeOut_req_bits_manager_id), // @[MixedNode.scala:542:17]
.auto_in_req_bits_data (reRoCCNodeOut_req_bits_data), // @[MixedNode.scala:542:17]
.auto_in_resp_ready (reRoCCNodeOut_resp_ready), // @[MixedNode.scala:542:17]
.auto_in_resp_valid (reRoCCNodeOut_resp_valid),
.auto_in_resp_bits_opcode (reRoCCNodeOut_resp_bits_opcode),
.auto_in_resp_bits_client_id (reRoCCNodeOut_resp_bits_client_id),
.auto_in_resp_bits_manager_id (reRoCCNodeOut_resp_bits_manager_id),
.auto_in_resp_bits_data (reRoCCNodeOut_resp_bits_data),
.auto_out_req_ready (_rerocc_manager_auto_in_req_ready), // @[Manager.scala:209:34]
.auto_out_req_valid (_rerocc_buffer_auto_out_req_valid),
.auto_out_req_bits_opcode (_rerocc_buffer_auto_out_req_bits_opcode),
.auto_out_req_bits_client_id (_rerocc_buffer_auto_out_req_bits_client_id),
.auto_out_req_bits_manager_id (_rerocc_buffer_auto_out_req_bits_manager_id),
.auto_out_req_bits_data (_rerocc_buffer_auto_out_req_bits_data),
.auto_out_resp_ready (_rerocc_buffer_auto_out_resp_ready),
.auto_out_resp_valid (_rerocc_manager_auto_in_resp_valid), // @[Manager.scala:209:34]
.auto_out_resp_bits_opcode (_rerocc_manager_auto_in_resp_bits_opcode), // @[Manager.scala:209:34]
.auto_out_resp_bits_client_id (_rerocc_manager_auto_in_resp_bits_client_id), // @[Manager.scala:209:34]
.auto_out_resp_bits_manager_id (_rerocc_manager_auto_in_resp_bits_manager_id), // @[Manager.scala:209:34]
.auto_out_resp_bits_data (_rerocc_manager_auto_in_resp_bits_data) // @[Manager.scala:209:34]
); // @[Protocol.scala:134:35]
TLXbar_i2_o1_a32d64s2k3z4c xbar ( // @[Xbar.scala:346:26]
.clock (clock),
.reset (reset),
.auto_anon_in_1_a_ready (widget_auto_anon_out_a_ready),
.auto_anon_in_1_a_valid (widget_auto_anon_out_a_valid), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_a_bits_opcode (widget_auto_anon_out_a_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_a_bits_param (widget_auto_anon_out_a_bits_param), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_a_bits_size (widget_auto_anon_out_a_bits_size), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_a_bits_source (widget_auto_anon_out_a_bits_source), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_a_bits_address (widget_auto_anon_out_a_bits_address), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_a_bits_mask (widget_auto_anon_out_a_bits_mask), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_a_bits_data (widget_auto_anon_out_a_bits_data), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_b_ready (widget_auto_anon_out_b_ready), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_b_valid (widget_auto_anon_out_b_valid),
.auto_anon_in_1_b_bits_opcode (widget_auto_anon_out_b_bits_opcode),
.auto_anon_in_1_b_bits_param (widget_auto_anon_out_b_bits_param),
.auto_anon_in_1_b_bits_size (widget_auto_anon_out_b_bits_size),
.auto_anon_in_1_b_bits_source (widget_auto_anon_out_b_bits_source),
.auto_anon_in_1_b_bits_address (widget_auto_anon_out_b_bits_address),
.auto_anon_in_1_b_bits_mask (widget_auto_anon_out_b_bits_mask),
.auto_anon_in_1_b_bits_data (widget_auto_anon_out_b_bits_data),
.auto_anon_in_1_b_bits_corrupt (widget_auto_anon_out_b_bits_corrupt),
.auto_anon_in_1_c_ready (widget_auto_anon_out_c_ready),
.auto_anon_in_1_c_valid (widget_auto_anon_out_c_valid), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_c_bits_opcode (widget_auto_anon_out_c_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_c_bits_param (widget_auto_anon_out_c_bits_param), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_c_bits_size (widget_auto_anon_out_c_bits_size), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_c_bits_source (widget_auto_anon_out_c_bits_source), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_c_bits_address (widget_auto_anon_out_c_bits_address), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_c_bits_data (widget_auto_anon_out_c_bits_data), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_d_ready (widget_auto_anon_out_d_ready), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_d_valid (widget_auto_anon_out_d_valid),
.auto_anon_in_1_d_bits_opcode (widget_auto_anon_out_d_bits_opcode),
.auto_anon_in_1_d_bits_param (widget_auto_anon_out_d_bits_param),
.auto_anon_in_1_d_bits_size (widget_auto_anon_out_d_bits_size),
.auto_anon_in_1_d_bits_source (widget_auto_anon_out_d_bits_source),
.auto_anon_in_1_d_bits_sink (widget_auto_anon_out_d_bits_sink),
.auto_anon_in_1_d_bits_denied (widget_auto_anon_out_d_bits_denied),
.auto_anon_in_1_d_bits_data (widget_auto_anon_out_d_bits_data),
.auto_anon_in_1_d_bits_corrupt (widget_auto_anon_out_d_bits_corrupt),
.auto_anon_in_1_e_ready (widget_auto_anon_out_e_ready),
.auto_anon_in_1_e_valid (widget_auto_anon_out_e_valid), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_e_bits_sink (widget_auto_anon_out_e_bits_sink), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_ready (_xbar_auto_anon_in_0_a_ready),
.auto_anon_in_0_a_valid (_counter_auto_atl_out_a_valid), // @[RoCCFragments.scala:56:29]
.auto_anon_in_0_a_bits_address (_counter_auto_atl_out_a_bits_address), // @[RoCCFragments.scala:56:29]
.auto_anon_in_0_d_ready (_counter_auto_atl_out_d_ready), // @[RoCCFragments.scala:56:29]
.auto_anon_in_0_d_valid (_xbar_auto_anon_in_0_d_valid),
.auto_anon_in_0_d_bits_opcode (_xbar_auto_anon_in_0_d_bits_opcode),
.auto_anon_in_0_d_bits_param (_xbar_auto_anon_in_0_d_bits_param),
.auto_anon_in_0_d_bits_size (_xbar_auto_anon_in_0_d_bits_size),
.auto_anon_in_0_d_bits_sink (_xbar_auto_anon_in_0_d_bits_sink),
.auto_anon_in_0_d_bits_denied (_xbar_auto_anon_in_0_d_bits_denied),
.auto_anon_in_0_d_bits_data (_xbar_auto_anon_in_0_d_bits_data),
.auto_anon_in_0_d_bits_corrupt (_xbar_auto_anon_in_0_d_bits_corrupt),
.auto_anon_out_a_ready (_buffer_auto_in_a_ready), // @[Buffer.scala:75:28]
.auto_anon_out_a_valid (_xbar_auto_anon_out_a_valid),
.auto_anon_out_a_bits_opcode (_xbar_auto_anon_out_a_bits_opcode),
.auto_anon_out_a_bits_param (_xbar_auto_anon_out_a_bits_param),
.auto_anon_out_a_bits_size (_xbar_auto_anon_out_a_bits_size),
.auto_anon_out_a_bits_source (_xbar_auto_anon_out_a_bits_source),
.auto_anon_out_a_bits_address (_xbar_auto_anon_out_a_bits_address),
.auto_anon_out_a_bits_mask (_xbar_auto_anon_out_a_bits_mask),
.auto_anon_out_a_bits_data (_xbar_auto_anon_out_a_bits_data),
.auto_anon_out_b_ready (_xbar_auto_anon_out_b_ready),
.auto_anon_out_b_valid (_buffer_auto_in_b_valid), // @[Buffer.scala:75:28]
.auto_anon_out_b_bits_opcode (_buffer_auto_in_b_bits_opcode), // @[Buffer.scala:75:28]
.auto_anon_out_b_bits_param (_buffer_auto_in_b_bits_param), // @[Buffer.scala:75:28]
.auto_anon_out_b_bits_size (_buffer_auto_in_b_bits_size), // @[Buffer.scala:75:28]
.auto_anon_out_b_bits_source (_buffer_auto_in_b_bits_source), // @[Buffer.scala:75:28]
.auto_anon_out_b_bits_address (_buffer_auto_in_b_bits_address), // @[Buffer.scala:75:28]
.auto_anon_out_b_bits_mask (_buffer_auto_in_b_bits_mask), // @[Buffer.scala:75:28]
.auto_anon_out_b_bits_data (_buffer_auto_in_b_bits_data), // @[Buffer.scala:75:28]
.auto_anon_out_b_bits_corrupt (_buffer_auto_in_b_bits_corrupt), // @[Buffer.scala:75:28]
.auto_anon_out_c_ready (_buffer_auto_in_c_ready), // @[Buffer.scala:75:28]
.auto_anon_out_c_valid (_xbar_auto_anon_out_c_valid),
.auto_anon_out_c_bits_opcode (_xbar_auto_anon_out_c_bits_opcode),
.auto_anon_out_c_bits_param (_xbar_auto_anon_out_c_bits_param),
.auto_anon_out_c_bits_size (_xbar_auto_anon_out_c_bits_size),
.auto_anon_out_c_bits_source (_xbar_auto_anon_out_c_bits_source),
.auto_anon_out_c_bits_address (_xbar_auto_anon_out_c_bits_address),
.auto_anon_out_c_bits_data (_xbar_auto_anon_out_c_bits_data),
.auto_anon_out_d_ready (_xbar_auto_anon_out_d_ready),
.auto_anon_out_d_valid (_buffer_auto_in_d_valid), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_opcode (_buffer_auto_in_d_bits_opcode), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_param (_buffer_auto_in_d_bits_param), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_size (_buffer_auto_in_d_bits_size), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_source (_buffer_auto_in_d_bits_source), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_sink (_buffer_auto_in_d_bits_sink), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_denied (_buffer_auto_in_d_bits_denied), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_data (_buffer_auto_in_d_bits_data), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_corrupt (_buffer_auto_in_d_bits_corrupt), // @[Buffer.scala:75:28]
.auto_anon_out_e_ready (_buffer_auto_in_e_ready), // @[Buffer.scala:75:28]
.auto_anon_out_e_valid (_xbar_auto_anon_out_e_valid),
.auto_anon_out_e_bits_sink (_xbar_auto_anon_out_e_bits_sink)
); // @[Xbar.scala:346:26]
TLBuffer_a32d64s2k3z4c_3 buffer ( // @[Buffer.scala:75:28]
.clock (clock),
.reset (reset),
.auto_in_a_ready (_buffer_auto_in_a_ready),
.auto_in_a_valid (_xbar_auto_anon_out_a_valid), // @[Xbar.scala:346:26]
.auto_in_a_bits_opcode (_xbar_auto_anon_out_a_bits_opcode), // @[Xbar.scala:346:26]
.auto_in_a_bits_param (_xbar_auto_anon_out_a_bits_param), // @[Xbar.scala:346:26]
.auto_in_a_bits_size (_xbar_auto_anon_out_a_bits_size), // @[Xbar.scala:346:26]
.auto_in_a_bits_source (_xbar_auto_anon_out_a_bits_source), // @[Xbar.scala:346:26]
.auto_in_a_bits_address (_xbar_auto_anon_out_a_bits_address), // @[Xbar.scala:346:26]
.auto_in_a_bits_mask (_xbar_auto_anon_out_a_bits_mask), // @[Xbar.scala:346:26]
.auto_in_a_bits_data (_xbar_auto_anon_out_a_bits_data), // @[Xbar.scala:346:26]
.auto_in_b_ready (_xbar_auto_anon_out_b_ready), // @[Xbar.scala:346:26]
.auto_in_b_valid (_buffer_auto_in_b_valid),
.auto_in_b_bits_opcode (_buffer_auto_in_b_bits_opcode),
.auto_in_b_bits_param (_buffer_auto_in_b_bits_param),
.auto_in_b_bits_size (_buffer_auto_in_b_bits_size),
.auto_in_b_bits_source (_buffer_auto_in_b_bits_source),
.auto_in_b_bits_address (_buffer_auto_in_b_bits_address),
.auto_in_b_bits_mask (_buffer_auto_in_b_bits_mask),
.auto_in_b_bits_data (_buffer_auto_in_b_bits_data),
.auto_in_b_bits_corrupt (_buffer_auto_in_b_bits_corrupt),
.auto_in_c_ready (_buffer_auto_in_c_ready),
.auto_in_c_valid (_xbar_auto_anon_out_c_valid), // @[Xbar.scala:346:26]
.auto_in_c_bits_opcode (_xbar_auto_anon_out_c_bits_opcode), // @[Xbar.scala:346:26]
.auto_in_c_bits_param (_xbar_auto_anon_out_c_bits_param), // @[Xbar.scala:346:26]
.auto_in_c_bits_size (_xbar_auto_anon_out_c_bits_size), // @[Xbar.scala:346:26]
.auto_in_c_bits_source (_xbar_auto_anon_out_c_bits_source), // @[Xbar.scala:346:26]
.auto_in_c_bits_address (_xbar_auto_anon_out_c_bits_address), // @[Xbar.scala:346:26]
.auto_in_c_bits_data (_xbar_auto_anon_out_c_bits_data), // @[Xbar.scala:346:26]
.auto_in_d_ready (_xbar_auto_anon_out_d_ready), // @[Xbar.scala:346:26]
.auto_in_d_valid (_buffer_auto_in_d_valid),
.auto_in_d_bits_opcode (_buffer_auto_in_d_bits_opcode),
.auto_in_d_bits_param (_buffer_auto_in_d_bits_param),
.auto_in_d_bits_size (_buffer_auto_in_d_bits_size),
.auto_in_d_bits_source (_buffer_auto_in_d_bits_source),
.auto_in_d_bits_sink (_buffer_auto_in_d_bits_sink),
.auto_in_d_bits_denied (_buffer_auto_in_d_bits_denied),
.auto_in_d_bits_data (_buffer_auto_in_d_bits_data),
.auto_in_d_bits_corrupt (_buffer_auto_in_d_bits_corrupt),
.auto_in_e_ready (_buffer_auto_in_e_ready),
.auto_in_e_valid (_xbar_auto_anon_out_e_valid), // @[Xbar.scala:346:26]
.auto_in_e_bits_sink (_xbar_auto_anon_out_e_bits_sink), // @[Xbar.scala:346:26]
.auto_out_a_ready (auto_buffer_out_a_ready_0), // @[Manager.scala:237:34]
.auto_out_a_valid (auto_buffer_out_a_valid_0),
.auto_out_a_bits_opcode (auto_buffer_out_a_bits_opcode_0),
.auto_out_a_bits_param (auto_buffer_out_a_bits_param_0),
.auto_out_a_bits_size (auto_buffer_out_a_bits_size_0),
.auto_out_a_bits_source (auto_buffer_out_a_bits_source_0),
.auto_out_a_bits_address (auto_buffer_out_a_bits_address_0),
.auto_out_a_bits_mask (auto_buffer_out_a_bits_mask_0),
.auto_out_a_bits_data (auto_buffer_out_a_bits_data_0),
.auto_out_a_bits_corrupt (auto_buffer_out_a_bits_corrupt_0),
.auto_out_b_ready (auto_buffer_out_b_ready_0),
.auto_out_b_valid (auto_buffer_out_b_valid_0), // @[Manager.scala:237:34]
.auto_out_b_bits_opcode (auto_buffer_out_b_bits_opcode_0), // @[Manager.scala:237:34]
.auto_out_b_bits_param (auto_buffer_out_b_bits_param_0), // @[Manager.scala:237:34]
.auto_out_b_bits_size (auto_buffer_out_b_bits_size_0), // @[Manager.scala:237:34]
.auto_out_b_bits_source (auto_buffer_out_b_bits_source_0), // @[Manager.scala:237:34]
.auto_out_b_bits_address (auto_buffer_out_b_bits_address_0), // @[Manager.scala:237:34]
.auto_out_b_bits_mask (auto_buffer_out_b_bits_mask_0), // @[Manager.scala:237:34]
.auto_out_b_bits_data (auto_buffer_out_b_bits_data_0), // @[Manager.scala:237:34]
.auto_out_b_bits_corrupt (auto_buffer_out_b_bits_corrupt_0), // @[Manager.scala:237:34]
.auto_out_c_ready (auto_buffer_out_c_ready_0), // @[Manager.scala:237:34]
.auto_out_c_valid (auto_buffer_out_c_valid_0),
.auto_out_c_bits_opcode (auto_buffer_out_c_bits_opcode_0),
.auto_out_c_bits_param (auto_buffer_out_c_bits_param_0),
.auto_out_c_bits_size (auto_buffer_out_c_bits_size_0),
.auto_out_c_bits_source (auto_buffer_out_c_bits_source_0),
.auto_out_c_bits_address (auto_buffer_out_c_bits_address_0),
.auto_out_c_bits_data (auto_buffer_out_c_bits_data_0),
.auto_out_c_bits_corrupt (auto_buffer_out_c_bits_corrupt_0),
.auto_out_d_ready (auto_buffer_out_d_ready_0),
.auto_out_d_valid (auto_buffer_out_d_valid_0), // @[Manager.scala:237:34]
.auto_out_d_bits_opcode (auto_buffer_out_d_bits_opcode_0), // @[Manager.scala:237:34]
.auto_out_d_bits_param (auto_buffer_out_d_bits_param_0), // @[Manager.scala:237:34]
.auto_out_d_bits_size (auto_buffer_out_d_bits_size_0), // @[Manager.scala:237:34]
.auto_out_d_bits_source (auto_buffer_out_d_bits_source_0), // @[Manager.scala:237:34]
.auto_out_d_bits_sink (auto_buffer_out_d_bits_sink_0), // @[Manager.scala:237:34]
.auto_out_d_bits_denied (auto_buffer_out_d_bits_denied_0), // @[Manager.scala:237:34]
.auto_out_d_bits_data (auto_buffer_out_d_bits_data_0), // @[Manager.scala:237:34]
.auto_out_d_bits_corrupt (auto_buffer_out_d_bits_corrupt_0), // @[Manager.scala:237:34]
.auto_out_e_ready (auto_buffer_out_e_ready_0), // @[Manager.scala:237:34]
.auto_out_e_valid (auto_buffer_out_e_valid_0),
.auto_out_e_bits_sink (auto_buffer_out_e_bits_sink_0)
); // @[Buffer.scala:75:28]
MiniDCache_4 dcache ( // @[Manager.scala:226:61]
.clock (clock),
.reset (reset),
.auto_out_a_ready (widget_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9]
.auto_out_a_valid (widget_auto_anon_in_a_valid),
.auto_out_a_bits_opcode (widget_auto_anon_in_a_bits_opcode),
.auto_out_a_bits_param (widget_auto_anon_in_a_bits_param),
.auto_out_a_bits_size (widget_auto_anon_in_a_bits_size),
.auto_out_a_bits_source (widget_auto_anon_in_a_bits_source),
.auto_out_a_bits_address (widget_auto_anon_in_a_bits_address),
.auto_out_a_bits_mask (widget_auto_anon_in_a_bits_mask),
.auto_out_a_bits_data (widget_auto_anon_in_a_bits_data),
.auto_out_b_ready (widget_auto_anon_in_b_ready),
.auto_out_b_valid (widget_auto_anon_in_b_valid), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_opcode (widget_auto_anon_in_b_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_param (widget_auto_anon_in_b_bits_param), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_size (widget_auto_anon_in_b_bits_size), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_source (widget_auto_anon_in_b_bits_source), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_address (widget_auto_anon_in_b_bits_address), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_mask (widget_auto_anon_in_b_bits_mask), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_data (widget_auto_anon_in_b_bits_data), // @[WidthWidget.scala:27:9]
.auto_out_b_bits_corrupt (widget_auto_anon_in_b_bits_corrupt), // @[WidthWidget.scala:27:9]
.auto_out_c_ready (widget_auto_anon_in_c_ready), // @[WidthWidget.scala:27:9]
.auto_out_c_valid (widget_auto_anon_in_c_valid),
.auto_out_c_bits_opcode (widget_auto_anon_in_c_bits_opcode),
.auto_out_c_bits_param (widget_auto_anon_in_c_bits_param),
.auto_out_c_bits_size (widget_auto_anon_in_c_bits_size),
.auto_out_c_bits_source (widget_auto_anon_in_c_bits_source),
.auto_out_c_bits_address (widget_auto_anon_in_c_bits_address),
.auto_out_c_bits_data (widget_auto_anon_in_c_bits_data),
.auto_out_d_ready (widget_auto_anon_in_d_ready),
.auto_out_d_valid (widget_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_opcode (widget_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_param (widget_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_size (widget_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_source (widget_auto_anon_in_d_bits_source), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_sink (widget_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_denied (widget_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_data (widget_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_corrupt (widget_auto_anon_in_d_bits_corrupt), // @[WidthWidget.scala:27:9]
.auto_out_e_ready (widget_auto_anon_in_e_ready), // @[WidthWidget.scala:27:9]
.auto_out_e_valid (widget_auto_anon_in_e_valid),
.auto_out_e_bits_sink (widget_auto_anon_in_e_bits_sink),
.io_cpu_req_ready (_dcache_io_cpu_req_ready),
.io_cpu_req_valid (_dcacheArb_io_mem_req_valid), // @[Manager.scala:238:27]
.io_cpu_req_bits_addr (_dcacheArb_io_mem_req_bits_addr), // @[Manager.scala:238:27]
.io_cpu_req_bits_tag (_dcacheArb_io_mem_req_bits_tag), // @[Manager.scala:238:27]
.io_cpu_req_bits_size (_dcacheArb_io_mem_req_bits_size), // @[Manager.scala:238:27]
.io_cpu_req_bits_dprv (_dcacheArb_io_mem_req_bits_dprv), // @[Manager.scala:238:27]
.io_cpu_req_bits_dv (_dcacheArb_io_mem_req_bits_dv), // @[Manager.scala:238:27]
.io_cpu_req_bits_phys (_dcacheArb_io_mem_req_bits_phys), // @[Manager.scala:238:27]
.io_cpu_s1_kill (_dcacheArb_io_mem_s1_kill), // @[Manager.scala:238:27]
.io_cpu_s1_data_data (_dcacheArb_io_mem_s1_data_data), // @[Manager.scala:238:27]
.io_cpu_s1_data_mask (_dcacheArb_io_mem_s1_data_mask), // @[Manager.scala:238:27]
.io_cpu_s2_nack (_dcache_io_cpu_s2_nack),
.io_cpu_s2_nack_cause_raw (_dcache_io_cpu_s2_nack_cause_raw),
.io_cpu_s2_uncached (_dcache_io_cpu_s2_uncached),
.io_cpu_s2_paddr (_dcache_io_cpu_s2_paddr),
.io_cpu_resp_valid (_dcache_io_cpu_resp_valid),
.io_cpu_resp_bits_addr (_dcache_io_cpu_resp_bits_addr),
.io_cpu_resp_bits_tag (_dcache_io_cpu_resp_bits_tag),
.io_cpu_resp_bits_cmd (_dcache_io_cpu_resp_bits_cmd),
.io_cpu_resp_bits_size (_dcache_io_cpu_resp_bits_size),
.io_cpu_resp_bits_signed (_dcache_io_cpu_resp_bits_signed),
.io_cpu_resp_bits_dprv (_dcache_io_cpu_resp_bits_dprv),
.io_cpu_resp_bits_dv (_dcache_io_cpu_resp_bits_dv),
.io_cpu_resp_bits_data (_dcache_io_cpu_resp_bits_data),
.io_cpu_resp_bits_mask (_dcache_io_cpu_resp_bits_mask),
.io_cpu_resp_bits_replay (_dcache_io_cpu_resp_bits_replay),
.io_cpu_resp_bits_has_data (_dcache_io_cpu_resp_bits_has_data),
.io_cpu_resp_bits_data_word_bypass (_dcache_io_cpu_resp_bits_data_word_bypass),
.io_cpu_resp_bits_data_raw (_dcache_io_cpu_resp_bits_data_raw),
.io_cpu_resp_bits_store_data (_dcache_io_cpu_resp_bits_store_data),
.io_cpu_replay_next (_dcache_io_cpu_replay_next),
.io_cpu_s2_xcpt_ma_ld (_dcache_io_cpu_s2_xcpt_ma_ld),
.io_cpu_s2_xcpt_ma_st (_dcache_io_cpu_s2_xcpt_ma_st),
.io_cpu_s2_xcpt_pf_ld (_dcache_io_cpu_s2_xcpt_pf_ld),
.io_cpu_s2_xcpt_pf_st (_dcache_io_cpu_s2_xcpt_pf_st),
.io_cpu_s2_xcpt_ae_ld (_dcache_io_cpu_s2_xcpt_ae_ld),
.io_cpu_s2_xcpt_ae_st (_dcache_io_cpu_s2_xcpt_ae_st),
.io_cpu_s2_gpa (_dcache_io_cpu_s2_gpa),
.io_cpu_ordered (_dcache_io_cpu_ordered),
.io_cpu_store_pending (_dcache_io_cpu_store_pending),
.io_cpu_perf_acquire (_dcache_io_cpu_perf_acquire),
.io_cpu_perf_release (_dcache_io_cpu_perf_release),
.io_cpu_perf_grant (_dcache_io_cpu_perf_grant),
.io_cpu_perf_tlbMiss (_dcache_io_cpu_perf_tlbMiss),
.io_cpu_perf_blocked (_dcache_io_cpu_perf_blocked),
.io_cpu_perf_canAcceptStoreThenLoad (_dcache_io_cpu_perf_canAcceptStoreThenLoad),
.io_cpu_perf_canAcceptStoreThenRMW (_dcache_io_cpu_perf_canAcceptStoreThenRMW),
.io_cpu_perf_canAcceptLoadThenLoad (_dcache_io_cpu_perf_canAcceptLoadThenLoad),
.io_cpu_perf_storeBufferEmptyAfterLoad (_dcache_io_cpu_perf_storeBufferEmptyAfterLoad),
.io_cpu_perf_storeBufferEmptyAfterStore (_dcache_io_cpu_perf_storeBufferEmptyAfterStore),
.io_ptw_req_ready (_ptw_io_requestor_0_req_ready), // @[Manager.scala:243:21]
.io_ptw_req_valid (_dcache_io_ptw_req_valid),
.io_ptw_req_bits_bits_addr (_dcache_io_ptw_req_bits_bits_addr),
.io_ptw_req_bits_bits_need_gpa (_dcache_io_ptw_req_bits_bits_need_gpa),
.io_ptw_resp_valid (_ptw_io_requestor_0_resp_valid), // @[Manager.scala:243:21]
.io_ptw_resp_bits_ae_ptw (_ptw_io_requestor_0_resp_bits_ae_ptw), // @[Manager.scala:243:21]
.io_ptw_resp_bits_ae_final (_ptw_io_requestor_0_resp_bits_ae_final), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pf (_ptw_io_requestor_0_resp_bits_pf), // @[Manager.scala:243:21]
.io_ptw_resp_bits_gf (_ptw_io_requestor_0_resp_bits_gf), // @[Manager.scala:243:21]
.io_ptw_resp_bits_hr (_ptw_io_requestor_0_resp_bits_hr), // @[Manager.scala:243:21]
.io_ptw_resp_bits_hw (_ptw_io_requestor_0_resp_bits_hw), // @[Manager.scala:243:21]
.io_ptw_resp_bits_hx (_ptw_io_requestor_0_resp_bits_hx), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_reserved_for_future (_ptw_io_requestor_0_resp_bits_pte_reserved_for_future), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_ppn (_ptw_io_requestor_0_resp_bits_pte_ppn), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_reserved_for_software (_ptw_io_requestor_0_resp_bits_pte_reserved_for_software), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_d (_ptw_io_requestor_0_resp_bits_pte_d), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_a (_ptw_io_requestor_0_resp_bits_pte_a), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_g (_ptw_io_requestor_0_resp_bits_pte_g), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_u (_ptw_io_requestor_0_resp_bits_pte_u), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_x (_ptw_io_requestor_0_resp_bits_pte_x), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_w (_ptw_io_requestor_0_resp_bits_pte_w), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_r (_ptw_io_requestor_0_resp_bits_pte_r), // @[Manager.scala:243:21]
.io_ptw_resp_bits_pte_v (_ptw_io_requestor_0_resp_bits_pte_v), // @[Manager.scala:243:21]
.io_ptw_resp_bits_level (_ptw_io_requestor_0_resp_bits_level), // @[Manager.scala:243:21]
.io_ptw_resp_bits_homogeneous (_ptw_io_requestor_0_resp_bits_homogeneous), // @[Manager.scala:243:21]
.io_ptw_resp_bits_gpa_valid (_ptw_io_requestor_0_resp_bits_gpa_valid), // @[Manager.scala:243:21]
.io_ptw_resp_bits_gpa_bits (_ptw_io_requestor_0_resp_bits_gpa_bits), // @[Manager.scala:243:21]
.io_ptw_resp_bits_gpa_is_pte (_ptw_io_requestor_0_resp_bits_gpa_is_pte), // @[Manager.scala:243:21]
.io_ptw_ptbr_mode (_ptw_io_requestor_0_ptbr_mode), // @[Manager.scala:243:21]
.io_ptw_ptbr_asid (_ptw_io_requestor_0_ptbr_asid), // @[Manager.scala:243:21]
.io_ptw_ptbr_ppn (_ptw_io_requestor_0_ptbr_ppn), // @[Manager.scala:243:21]
.io_ptw_status_debug (_ptw_io_requestor_0_status_debug), // @[Manager.scala:243:21]
.io_ptw_status_cease (_ptw_io_requestor_0_status_cease), // @[Manager.scala:243:21]
.io_ptw_status_wfi (_ptw_io_requestor_0_status_wfi), // @[Manager.scala:243:21]
.io_ptw_status_isa (_ptw_io_requestor_0_status_isa), // @[Manager.scala:243:21]
.io_ptw_status_dprv (_ptw_io_requestor_0_status_dprv), // @[Manager.scala:243:21]
.io_ptw_status_dv (_ptw_io_requestor_0_status_dv), // @[Manager.scala:243:21]
.io_ptw_status_prv (_ptw_io_requestor_0_status_prv), // @[Manager.scala:243:21]
.io_ptw_status_v (_ptw_io_requestor_0_status_v), // @[Manager.scala:243:21]
.io_ptw_status_sd (_ptw_io_requestor_0_status_sd), // @[Manager.scala:243:21]
.io_ptw_status_zero2 (_ptw_io_requestor_0_status_zero2), // @[Manager.scala:243:21]
.io_ptw_status_mpv (_ptw_io_requestor_0_status_mpv), // @[Manager.scala:243:21]
.io_ptw_status_gva (_ptw_io_requestor_0_status_gva), // @[Manager.scala:243:21]
.io_ptw_status_mbe (_ptw_io_requestor_0_status_mbe), // @[Manager.scala:243:21]
.io_ptw_status_sbe (_ptw_io_requestor_0_status_sbe), // @[Manager.scala:243:21]
.io_ptw_status_sxl (_ptw_io_requestor_0_status_sxl), // @[Manager.scala:243:21]
.io_ptw_status_uxl (_ptw_io_requestor_0_status_uxl), // @[Manager.scala:243:21]
.io_ptw_status_sd_rv32 (_ptw_io_requestor_0_status_sd_rv32), // @[Manager.scala:243:21]
.io_ptw_status_zero1 (_ptw_io_requestor_0_status_zero1), // @[Manager.scala:243:21]
.io_ptw_status_tsr (_ptw_io_requestor_0_status_tsr), // @[Manager.scala:243:21]
.io_ptw_status_tw (_ptw_io_requestor_0_status_tw), // @[Manager.scala:243:21]
.io_ptw_status_tvm (_ptw_io_requestor_0_status_tvm), // @[Manager.scala:243:21]
.io_ptw_status_mxr (_ptw_io_requestor_0_status_mxr), // @[Manager.scala:243:21]
.io_ptw_status_sum (_ptw_io_requestor_0_status_sum), // @[Manager.scala:243:21]
.io_ptw_status_mprv (_ptw_io_requestor_0_status_mprv), // @[Manager.scala:243:21]
.io_ptw_status_xs (_ptw_io_requestor_0_status_xs), // @[Manager.scala:243:21]
.io_ptw_status_fs (_ptw_io_requestor_0_status_fs), // @[Manager.scala:243:21]
.io_ptw_status_mpp (_ptw_io_requestor_0_status_mpp), // @[Manager.scala:243:21]
.io_ptw_status_vs (_ptw_io_requestor_0_status_vs), // @[Manager.scala:243:21]
.io_ptw_status_spp (_ptw_io_requestor_0_status_spp), // @[Manager.scala:243:21]
.io_ptw_status_mpie (_ptw_io_requestor_0_status_mpie), // @[Manager.scala:243:21]
.io_ptw_status_ube (_ptw_io_requestor_0_status_ube), // @[Manager.scala:243:21]
.io_ptw_status_spie (_ptw_io_requestor_0_status_spie), // @[Manager.scala:243:21]
.io_ptw_status_upie (_ptw_io_requestor_0_status_upie), // @[Manager.scala:243:21]
.io_ptw_status_mie (_ptw_io_requestor_0_status_mie), // @[Manager.scala:243:21]
.io_ptw_status_hie (_ptw_io_requestor_0_status_hie), // @[Manager.scala:243:21]
.io_ptw_status_sie (_ptw_io_requestor_0_status_sie), // @[Manager.scala:243:21]
.io_ptw_status_uie (_ptw_io_requestor_0_status_uie) // @[Manager.scala:243:21]
); // @[Manager.scala:226:61]
ReRoCCManagerControl_4 ctrl ( // @[Manager.scala:235:24]
.clock (clock),
.reset (reset),
.auto_ctrl_in_a_ready (auto_ctrl_ctrl_in_a_ready_0),
.auto_ctrl_in_a_valid (auto_ctrl_ctrl_in_a_valid_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_opcode (auto_ctrl_ctrl_in_a_bits_opcode_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_param (auto_ctrl_ctrl_in_a_bits_param_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_size (auto_ctrl_ctrl_in_a_bits_size_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_source (auto_ctrl_ctrl_in_a_bits_source_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_address (auto_ctrl_ctrl_in_a_bits_address_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_mask (auto_ctrl_ctrl_in_a_bits_mask_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_data (auto_ctrl_ctrl_in_a_bits_data_0), // @[Manager.scala:237:34]
.auto_ctrl_in_a_bits_corrupt (auto_ctrl_ctrl_in_a_bits_corrupt_0), // @[Manager.scala:237:34]
.auto_ctrl_in_d_ready (auto_ctrl_ctrl_in_d_ready_0), // @[Manager.scala:237:34]
.auto_ctrl_in_d_valid (auto_ctrl_ctrl_in_d_valid_0),
.auto_ctrl_in_d_bits_opcode (auto_ctrl_ctrl_in_d_bits_opcode_0),
.auto_ctrl_in_d_bits_size (auto_ctrl_ctrl_in_d_bits_size_0),
.auto_ctrl_in_d_bits_source (auto_ctrl_ctrl_in_d_bits_source_0),
.auto_ctrl_in_d_bits_data (auto_ctrl_ctrl_in_d_bits_data_0),
.io_mgr_busy (_counter_io_busy), // @[RoCCFragments.scala:56:29]
.io_rocc_busy (_counter_io_busy) // @[RoCCFragments.scala:56:29]
); // @[Manager.scala:235:24]
HellaCacheArbiter_5 dcacheArb ( // @[Manager.scala:238:27]
.clock (clock),
.reset (reset),
.io_requestor_0_req_ready (_dcacheArb_io_requestor_0_req_ready),
.io_requestor_0_req_valid (_ptw_io_mem_req_valid), // @[Manager.scala:243:21]
.io_requestor_0_req_bits_addr (_ptw_io_mem_req_bits_addr), // @[Manager.scala:243:21]
.io_requestor_0_req_bits_dv (_ptw_io_mem_req_bits_dv), // @[Manager.scala:243:21]
.io_requestor_0_s1_kill (_ptw_io_mem_s1_kill), // @[Manager.scala:243:21]
.io_requestor_0_s2_nack (_dcacheArb_io_requestor_0_s2_nack),
.io_requestor_0_s2_nack_cause_raw (_dcacheArb_io_requestor_0_s2_nack_cause_raw),
.io_requestor_0_s2_uncached (_dcacheArb_io_requestor_0_s2_uncached),
.io_requestor_0_s2_paddr (_dcacheArb_io_requestor_0_s2_paddr),
.io_requestor_0_resp_valid (_dcacheArb_io_requestor_0_resp_valid),
.io_requestor_0_resp_bits_addr (_dcacheArb_io_requestor_0_resp_bits_addr),
.io_requestor_0_resp_bits_tag (_dcacheArb_io_requestor_0_resp_bits_tag),
.io_requestor_0_resp_bits_cmd (_dcacheArb_io_requestor_0_resp_bits_cmd),
.io_requestor_0_resp_bits_size (_dcacheArb_io_requestor_0_resp_bits_size),
.io_requestor_0_resp_bits_signed (_dcacheArb_io_requestor_0_resp_bits_signed),
.io_requestor_0_resp_bits_dprv (_dcacheArb_io_requestor_0_resp_bits_dprv),
.io_requestor_0_resp_bits_dv (_dcacheArb_io_requestor_0_resp_bits_dv),
.io_requestor_0_resp_bits_data (_dcacheArb_io_requestor_0_resp_bits_data),
.io_requestor_0_resp_bits_mask (_dcacheArb_io_requestor_0_resp_bits_mask),
.io_requestor_0_resp_bits_replay (_dcacheArb_io_requestor_0_resp_bits_replay),
.io_requestor_0_resp_bits_has_data (_dcacheArb_io_requestor_0_resp_bits_has_data),
.io_requestor_0_resp_bits_data_word_bypass (_dcacheArb_io_requestor_0_resp_bits_data_word_bypass),
.io_requestor_0_resp_bits_data_raw (_dcacheArb_io_requestor_0_resp_bits_data_raw),
.io_requestor_0_resp_bits_store_data (_dcacheArb_io_requestor_0_resp_bits_store_data),
.io_requestor_0_replay_next (_dcacheArb_io_requestor_0_replay_next),
.io_requestor_0_s2_xcpt_ma_ld (_dcacheArb_io_requestor_0_s2_xcpt_ma_ld),
.io_requestor_0_s2_xcpt_ma_st (_dcacheArb_io_requestor_0_s2_xcpt_ma_st),
.io_requestor_0_s2_xcpt_pf_ld (_dcacheArb_io_requestor_0_s2_xcpt_pf_ld),
.io_requestor_0_s2_xcpt_pf_st (_dcacheArb_io_requestor_0_s2_xcpt_pf_st),
.io_requestor_0_s2_xcpt_ae_ld (_dcacheArb_io_requestor_0_s2_xcpt_ae_ld),
.io_requestor_0_s2_xcpt_ae_st (_dcacheArb_io_requestor_0_s2_xcpt_ae_st),
.io_requestor_0_s2_gpa (_dcacheArb_io_requestor_0_s2_gpa),
.io_requestor_0_ordered (_dcacheArb_io_requestor_0_ordered),
.io_requestor_0_store_pending (_dcacheArb_io_requestor_0_store_pending),
.io_requestor_0_perf_acquire (_dcacheArb_io_requestor_0_perf_acquire),
.io_requestor_0_perf_release (_dcacheArb_io_requestor_0_perf_release),
.io_requestor_0_perf_grant (_dcacheArb_io_requestor_0_perf_grant),
.io_requestor_0_perf_tlbMiss (_dcacheArb_io_requestor_0_perf_tlbMiss),
.io_requestor_0_perf_blocked (_dcacheArb_io_requestor_0_perf_blocked),
.io_requestor_0_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad),
.io_requestor_0_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW),
.io_requestor_0_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad),
.io_requestor_0_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad),
.io_requestor_0_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore),
.io_requestor_1_req_ready (_dcacheArb_io_requestor_1_req_ready),
.io_requestor_1_req_valid (_dcIF_io_cache_req_valid), // @[Manager.scala:255:22]
.io_requestor_1_s1_data_data (_dcIF_io_cache_s1_data_data), // @[Manager.scala:255:22]
.io_requestor_1_s1_data_mask (_dcIF_io_cache_s1_data_mask), // @[Manager.scala:255:22]
.io_requestor_1_s2_nack (_dcacheArb_io_requestor_1_s2_nack),
.io_requestor_1_s2_nack_cause_raw (_dcacheArb_io_requestor_1_s2_nack_cause_raw),
.io_requestor_1_s2_uncached (_dcacheArb_io_requestor_1_s2_uncached),
.io_requestor_1_s2_paddr (_dcacheArb_io_requestor_1_s2_paddr),
.io_requestor_1_resp_valid (_dcacheArb_io_requestor_1_resp_valid),
.io_requestor_1_resp_bits_addr (_dcacheArb_io_requestor_1_resp_bits_addr),
.io_requestor_1_resp_bits_tag (_dcacheArb_io_requestor_1_resp_bits_tag),
.io_requestor_1_resp_bits_cmd (_dcacheArb_io_requestor_1_resp_bits_cmd),
.io_requestor_1_resp_bits_size (_dcacheArb_io_requestor_1_resp_bits_size),
.io_requestor_1_resp_bits_signed (_dcacheArb_io_requestor_1_resp_bits_signed),
.io_requestor_1_resp_bits_dprv (_dcacheArb_io_requestor_1_resp_bits_dprv),
.io_requestor_1_resp_bits_dv (_dcacheArb_io_requestor_1_resp_bits_dv),
.io_requestor_1_resp_bits_data (_dcacheArb_io_requestor_1_resp_bits_data),
.io_requestor_1_resp_bits_mask (_dcacheArb_io_requestor_1_resp_bits_mask),
.io_requestor_1_resp_bits_replay (_dcacheArb_io_requestor_1_resp_bits_replay),
.io_requestor_1_resp_bits_has_data (_dcacheArb_io_requestor_1_resp_bits_has_data),
.io_requestor_1_resp_bits_data_word_bypass (_dcacheArb_io_requestor_1_resp_bits_data_word_bypass),
.io_requestor_1_resp_bits_data_raw (_dcacheArb_io_requestor_1_resp_bits_data_raw),
.io_requestor_1_resp_bits_store_data (_dcacheArb_io_requestor_1_resp_bits_store_data),
.io_requestor_1_replay_next (_dcacheArb_io_requestor_1_replay_next),
.io_requestor_1_s2_xcpt_ma_ld (_dcacheArb_io_requestor_1_s2_xcpt_ma_ld),
.io_requestor_1_s2_xcpt_ma_st (_dcacheArb_io_requestor_1_s2_xcpt_ma_st),
.io_requestor_1_s2_xcpt_pf_ld (_dcacheArb_io_requestor_1_s2_xcpt_pf_ld),
.io_requestor_1_s2_xcpt_pf_st (_dcacheArb_io_requestor_1_s2_xcpt_pf_st),
.io_requestor_1_s2_xcpt_ae_ld (_dcacheArb_io_requestor_1_s2_xcpt_ae_ld),
.io_requestor_1_s2_xcpt_ae_st (_dcacheArb_io_requestor_1_s2_xcpt_ae_st),
.io_requestor_1_s2_gpa (_dcacheArb_io_requestor_1_s2_gpa),
.io_requestor_1_ordered (_dcacheArb_io_requestor_1_ordered),
.io_requestor_1_store_pending (_dcacheArb_io_requestor_1_store_pending),
.io_requestor_1_perf_acquire (_dcacheArb_io_requestor_1_perf_acquire),
.io_requestor_1_perf_release (_dcacheArb_io_requestor_1_perf_release),
.io_requestor_1_perf_grant (_dcacheArb_io_requestor_1_perf_grant),
.io_requestor_1_perf_tlbMiss (_dcacheArb_io_requestor_1_perf_tlbMiss),
.io_requestor_1_perf_blocked (_dcacheArb_io_requestor_1_perf_blocked),
.io_requestor_1_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad),
.io_requestor_1_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW),
.io_requestor_1_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad),
.io_requestor_1_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad),
.io_requestor_1_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore),
.io_mem_req_ready (_dcache_io_cpu_req_ready), // @[Manager.scala:226:61]
.io_mem_req_valid (_dcacheArb_io_mem_req_valid),
.io_mem_req_bits_addr (_dcacheArb_io_mem_req_bits_addr),
.io_mem_req_bits_tag (_dcacheArb_io_mem_req_bits_tag),
.io_mem_req_bits_size (_dcacheArb_io_mem_req_bits_size),
.io_mem_req_bits_dprv (_dcacheArb_io_mem_req_bits_dprv),
.io_mem_req_bits_dv (_dcacheArb_io_mem_req_bits_dv),
.io_mem_req_bits_phys (_dcacheArb_io_mem_req_bits_phys),
.io_mem_s1_kill (_dcacheArb_io_mem_s1_kill),
.io_mem_s1_data_data (_dcacheArb_io_mem_s1_data_data),
.io_mem_s1_data_mask (_dcacheArb_io_mem_s1_data_mask),
.io_mem_s2_nack (_dcache_io_cpu_s2_nack), // @[Manager.scala:226:61]
.io_mem_s2_nack_cause_raw (_dcache_io_cpu_s2_nack_cause_raw), // @[Manager.scala:226:61]
.io_mem_s2_uncached (_dcache_io_cpu_s2_uncached), // @[Manager.scala:226:61]
.io_mem_s2_paddr (_dcache_io_cpu_s2_paddr), // @[Manager.scala:226:61]
.io_mem_resp_valid (_dcache_io_cpu_resp_valid), // @[Manager.scala:226:61]
.io_mem_resp_bits_addr (_dcache_io_cpu_resp_bits_addr), // @[Manager.scala:226:61]
.io_mem_resp_bits_tag (_dcache_io_cpu_resp_bits_tag), // @[Manager.scala:226:61]
.io_mem_resp_bits_cmd (_dcache_io_cpu_resp_bits_cmd), // @[Manager.scala:226:61]
.io_mem_resp_bits_size (_dcache_io_cpu_resp_bits_size), // @[Manager.scala:226:61]
.io_mem_resp_bits_signed (_dcache_io_cpu_resp_bits_signed), // @[Manager.scala:226:61]
.io_mem_resp_bits_dprv (_dcache_io_cpu_resp_bits_dprv), // @[Manager.scala:226:61]
.io_mem_resp_bits_dv (_dcache_io_cpu_resp_bits_dv), // @[Manager.scala:226:61]
.io_mem_resp_bits_data (_dcache_io_cpu_resp_bits_data), // @[Manager.scala:226:61]
.io_mem_resp_bits_mask (_dcache_io_cpu_resp_bits_mask), // @[Manager.scala:226:61]
.io_mem_resp_bits_replay (_dcache_io_cpu_resp_bits_replay), // @[Manager.scala:226:61]
.io_mem_resp_bits_has_data (_dcache_io_cpu_resp_bits_has_data), // @[Manager.scala:226:61]
.io_mem_resp_bits_data_word_bypass (_dcache_io_cpu_resp_bits_data_word_bypass), // @[Manager.scala:226:61]
.io_mem_resp_bits_data_raw (_dcache_io_cpu_resp_bits_data_raw), // @[Manager.scala:226:61]
.io_mem_resp_bits_store_data (_dcache_io_cpu_resp_bits_store_data), // @[Manager.scala:226:61]
.io_mem_replay_next (_dcache_io_cpu_replay_next), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_ma_ld (_dcache_io_cpu_s2_xcpt_ma_ld), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_ma_st (_dcache_io_cpu_s2_xcpt_ma_st), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_pf_ld (_dcache_io_cpu_s2_xcpt_pf_ld), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_pf_st (_dcache_io_cpu_s2_xcpt_pf_st), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_ae_ld (_dcache_io_cpu_s2_xcpt_ae_ld), // @[Manager.scala:226:61]
.io_mem_s2_xcpt_ae_st (_dcache_io_cpu_s2_xcpt_ae_st), // @[Manager.scala:226:61]
.io_mem_s2_gpa (_dcache_io_cpu_s2_gpa), // @[Manager.scala:226:61]
.io_mem_ordered (_dcache_io_cpu_ordered), // @[Manager.scala:226:61]
.io_mem_store_pending (_dcache_io_cpu_store_pending), // @[Manager.scala:226:61]
.io_mem_perf_acquire (_dcache_io_cpu_perf_acquire), // @[Manager.scala:226:61]
.io_mem_perf_release (_dcache_io_cpu_perf_release), // @[Manager.scala:226:61]
.io_mem_perf_grant (_dcache_io_cpu_perf_grant), // @[Manager.scala:226:61]
.io_mem_perf_tlbMiss (_dcache_io_cpu_perf_tlbMiss), // @[Manager.scala:226:61]
.io_mem_perf_blocked (_dcache_io_cpu_perf_blocked), // @[Manager.scala:226:61]
.io_mem_perf_canAcceptStoreThenLoad (_dcache_io_cpu_perf_canAcceptStoreThenLoad), // @[Manager.scala:226:61]
.io_mem_perf_canAcceptStoreThenRMW (_dcache_io_cpu_perf_canAcceptStoreThenRMW), // @[Manager.scala:226:61]
.io_mem_perf_canAcceptLoadThenLoad (_dcache_io_cpu_perf_canAcceptLoadThenLoad), // @[Manager.scala:226:61]
.io_mem_perf_storeBufferEmptyAfterLoad (_dcache_io_cpu_perf_storeBufferEmptyAfterLoad), // @[Manager.scala:226:61]
.io_mem_perf_storeBufferEmptyAfterStore (_dcache_io_cpu_perf_storeBufferEmptyAfterStore) // @[Manager.scala:226:61]
); // @[Manager.scala:238:27]
PTW_5 ptw ( // @[Manager.scala:243:21]
.clock (clock),
.reset (reset),
.io_requestor_0_req_ready (_ptw_io_requestor_0_req_ready),
.io_requestor_0_req_valid (_dcache_io_ptw_req_valid), // @[Manager.scala:226:61]
.io_requestor_0_req_bits_bits_addr (_dcache_io_ptw_req_bits_bits_addr), // @[Manager.scala:226:61]
.io_requestor_0_req_bits_bits_need_gpa (_dcache_io_ptw_req_bits_bits_need_gpa), // @[Manager.scala:226:61]
.io_requestor_0_resp_valid (_ptw_io_requestor_0_resp_valid),
.io_requestor_0_resp_bits_ae_ptw (_ptw_io_requestor_0_resp_bits_ae_ptw),
.io_requestor_0_resp_bits_ae_final (_ptw_io_requestor_0_resp_bits_ae_final),
.io_requestor_0_resp_bits_pf (_ptw_io_requestor_0_resp_bits_pf),
.io_requestor_0_resp_bits_gf (_ptw_io_requestor_0_resp_bits_gf),
.io_requestor_0_resp_bits_hr (_ptw_io_requestor_0_resp_bits_hr),
.io_requestor_0_resp_bits_hw (_ptw_io_requestor_0_resp_bits_hw),
.io_requestor_0_resp_bits_hx (_ptw_io_requestor_0_resp_bits_hx),
.io_requestor_0_resp_bits_pte_reserved_for_future (_ptw_io_requestor_0_resp_bits_pte_reserved_for_future),
.io_requestor_0_resp_bits_pte_ppn (_ptw_io_requestor_0_resp_bits_pte_ppn),
.io_requestor_0_resp_bits_pte_reserved_for_software (_ptw_io_requestor_0_resp_bits_pte_reserved_for_software),
.io_requestor_0_resp_bits_pte_d (_ptw_io_requestor_0_resp_bits_pte_d),
.io_requestor_0_resp_bits_pte_a (_ptw_io_requestor_0_resp_bits_pte_a),
.io_requestor_0_resp_bits_pte_g (_ptw_io_requestor_0_resp_bits_pte_g),
.io_requestor_0_resp_bits_pte_u (_ptw_io_requestor_0_resp_bits_pte_u),
.io_requestor_0_resp_bits_pte_x (_ptw_io_requestor_0_resp_bits_pte_x),
.io_requestor_0_resp_bits_pte_w (_ptw_io_requestor_0_resp_bits_pte_w),
.io_requestor_0_resp_bits_pte_r (_ptw_io_requestor_0_resp_bits_pte_r),
.io_requestor_0_resp_bits_pte_v (_ptw_io_requestor_0_resp_bits_pte_v),
.io_requestor_0_resp_bits_level (_ptw_io_requestor_0_resp_bits_level),
.io_requestor_0_resp_bits_homogeneous (_ptw_io_requestor_0_resp_bits_homogeneous),
.io_requestor_0_resp_bits_gpa_valid (_ptw_io_requestor_0_resp_bits_gpa_valid),
.io_requestor_0_resp_bits_gpa_bits (_ptw_io_requestor_0_resp_bits_gpa_bits),
.io_requestor_0_resp_bits_gpa_is_pte (_ptw_io_requestor_0_resp_bits_gpa_is_pte),
.io_requestor_0_ptbr_mode (_ptw_io_requestor_0_ptbr_mode),
.io_requestor_0_ptbr_asid (_ptw_io_requestor_0_ptbr_asid),
.io_requestor_0_ptbr_ppn (_ptw_io_requestor_0_ptbr_ppn),
.io_requestor_0_status_debug (_ptw_io_requestor_0_status_debug),
.io_requestor_0_status_cease (_ptw_io_requestor_0_status_cease),
.io_requestor_0_status_wfi (_ptw_io_requestor_0_status_wfi),
.io_requestor_0_status_isa (_ptw_io_requestor_0_status_isa),
.io_requestor_0_status_dprv (_ptw_io_requestor_0_status_dprv),
.io_requestor_0_status_dv (_ptw_io_requestor_0_status_dv),
.io_requestor_0_status_prv (_ptw_io_requestor_0_status_prv),
.io_requestor_0_status_v (_ptw_io_requestor_0_status_v),
.io_requestor_0_status_sd (_ptw_io_requestor_0_status_sd),
.io_requestor_0_status_zero2 (_ptw_io_requestor_0_status_zero2),
.io_requestor_0_status_mpv (_ptw_io_requestor_0_status_mpv),
.io_requestor_0_status_gva (_ptw_io_requestor_0_status_gva),
.io_requestor_0_status_mbe (_ptw_io_requestor_0_status_mbe),
.io_requestor_0_status_sbe (_ptw_io_requestor_0_status_sbe),
.io_requestor_0_status_sxl (_ptw_io_requestor_0_status_sxl),
.io_requestor_0_status_uxl (_ptw_io_requestor_0_status_uxl),
.io_requestor_0_status_sd_rv32 (_ptw_io_requestor_0_status_sd_rv32),
.io_requestor_0_status_zero1 (_ptw_io_requestor_0_status_zero1),
.io_requestor_0_status_tsr (_ptw_io_requestor_0_status_tsr),
.io_requestor_0_status_tw (_ptw_io_requestor_0_status_tw),
.io_requestor_0_status_tvm (_ptw_io_requestor_0_status_tvm),
.io_requestor_0_status_mxr (_ptw_io_requestor_0_status_mxr),
.io_requestor_0_status_sum (_ptw_io_requestor_0_status_sum),
.io_requestor_0_status_mprv (_ptw_io_requestor_0_status_mprv),
.io_requestor_0_status_xs (_ptw_io_requestor_0_status_xs),
.io_requestor_0_status_fs (_ptw_io_requestor_0_status_fs),
.io_requestor_0_status_mpp (_ptw_io_requestor_0_status_mpp),
.io_requestor_0_status_vs (_ptw_io_requestor_0_status_vs),
.io_requestor_0_status_spp (_ptw_io_requestor_0_status_spp),
.io_requestor_0_status_mpie (_ptw_io_requestor_0_status_mpie),
.io_requestor_0_status_ube (_ptw_io_requestor_0_status_ube),
.io_requestor_0_status_spie (_ptw_io_requestor_0_status_spie),
.io_requestor_0_status_upie (_ptw_io_requestor_0_status_upie),
.io_requestor_0_status_mie (_ptw_io_requestor_0_status_mie),
.io_requestor_0_status_hie (_ptw_io_requestor_0_status_hie),
.io_requestor_0_status_sie (_ptw_io_requestor_0_status_sie),
.io_requestor_0_status_uie (_ptw_io_requestor_0_status_uie),
.io_mem_req_ready (_dcacheArb_io_requestor_0_req_ready), // @[Manager.scala:238:27]
.io_mem_req_valid (_ptw_io_mem_req_valid),
.io_mem_req_bits_addr (_ptw_io_mem_req_bits_addr),
.io_mem_req_bits_dv (_ptw_io_mem_req_bits_dv),
.io_mem_s1_kill (_ptw_io_mem_s1_kill),
.io_mem_s2_nack (_dcacheArb_io_requestor_0_s2_nack), // @[Manager.scala:238:27]
.io_mem_s2_nack_cause_raw (_dcacheArb_io_requestor_0_s2_nack_cause_raw), // @[Manager.scala:238:27]
.io_mem_s2_uncached (_dcacheArb_io_requestor_0_s2_uncached), // @[Manager.scala:238:27]
.io_mem_s2_paddr (_dcacheArb_io_requestor_0_s2_paddr), // @[Manager.scala:238:27]
.io_mem_resp_valid (_dcacheArb_io_requestor_0_resp_valid), // @[Manager.scala:238:27]
.io_mem_resp_bits_addr (_dcacheArb_io_requestor_0_resp_bits_addr), // @[Manager.scala:238:27]
.io_mem_resp_bits_tag (_dcacheArb_io_requestor_0_resp_bits_tag), // @[Manager.scala:238:27]
.io_mem_resp_bits_cmd (_dcacheArb_io_requestor_0_resp_bits_cmd), // @[Manager.scala:238:27]
.io_mem_resp_bits_size (_dcacheArb_io_requestor_0_resp_bits_size), // @[Manager.scala:238:27]
.io_mem_resp_bits_signed (_dcacheArb_io_requestor_0_resp_bits_signed), // @[Manager.scala:238:27]
.io_mem_resp_bits_dprv (_dcacheArb_io_requestor_0_resp_bits_dprv), // @[Manager.scala:238:27]
.io_mem_resp_bits_dv (_dcacheArb_io_requestor_0_resp_bits_dv), // @[Manager.scala:238:27]
.io_mem_resp_bits_data (_dcacheArb_io_requestor_0_resp_bits_data), // @[Manager.scala:238:27]
.io_mem_resp_bits_mask (_dcacheArb_io_requestor_0_resp_bits_mask), // @[Manager.scala:238:27]
.io_mem_resp_bits_replay (_dcacheArb_io_requestor_0_resp_bits_replay), // @[Manager.scala:238:27]
.io_mem_resp_bits_has_data (_dcacheArb_io_requestor_0_resp_bits_has_data), // @[Manager.scala:238:27]
.io_mem_resp_bits_data_word_bypass (_dcacheArb_io_requestor_0_resp_bits_data_word_bypass), // @[Manager.scala:238:27]
.io_mem_resp_bits_data_raw (_dcacheArb_io_requestor_0_resp_bits_data_raw), // @[Manager.scala:238:27]
.io_mem_resp_bits_store_data (_dcacheArb_io_requestor_0_resp_bits_store_data), // @[Manager.scala:238:27]
.io_mem_replay_next (_dcacheArb_io_requestor_0_replay_next), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_ma_ld (_dcacheArb_io_requestor_0_s2_xcpt_ma_ld), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_ma_st (_dcacheArb_io_requestor_0_s2_xcpt_ma_st), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_pf_ld (_dcacheArb_io_requestor_0_s2_xcpt_pf_ld), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_pf_st (_dcacheArb_io_requestor_0_s2_xcpt_pf_st), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_ae_ld (_dcacheArb_io_requestor_0_s2_xcpt_ae_ld), // @[Manager.scala:238:27]
.io_mem_s2_xcpt_ae_st (_dcacheArb_io_requestor_0_s2_xcpt_ae_st), // @[Manager.scala:238:27]
.io_mem_s2_gpa (_dcacheArb_io_requestor_0_s2_gpa), // @[Manager.scala:238:27]
.io_mem_ordered (_dcacheArb_io_requestor_0_ordered), // @[Manager.scala:238:27]
.io_mem_store_pending (_dcacheArb_io_requestor_0_store_pending), // @[Manager.scala:238:27]
.io_mem_perf_acquire (_dcacheArb_io_requestor_0_perf_acquire), // @[Manager.scala:238:27]
.io_mem_perf_release (_dcacheArb_io_requestor_0_perf_release), // @[Manager.scala:238:27]
.io_mem_perf_grant (_dcacheArb_io_requestor_0_perf_grant), // @[Manager.scala:238:27]
.io_mem_perf_tlbMiss (_dcacheArb_io_requestor_0_perf_tlbMiss), // @[Manager.scala:238:27]
.io_mem_perf_blocked (_dcacheArb_io_requestor_0_perf_blocked), // @[Manager.scala:238:27]
.io_mem_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad), // @[Manager.scala:238:27]
.io_mem_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW), // @[Manager.scala:238:27]
.io_mem_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad), // @[Manager.scala:238:27]
.io_mem_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad), // @[Manager.scala:238:27]
.io_mem_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore), // @[Manager.scala:238:27]
.io_dpath_ptbr_mode (_rerocc_manager_io_ptw_ptbr_mode), // @[Manager.scala:209:34]
.io_dpath_ptbr_asid (_rerocc_manager_io_ptw_ptbr_asid), // @[Manager.scala:209:34]
.io_dpath_ptbr_ppn (_rerocc_manager_io_ptw_ptbr_ppn), // @[Manager.scala:209:34]
.io_dpath_sfence_valid (_rerocc_manager_io_ptw_sfence_valid), // @[Manager.scala:209:34]
.io_dpath_status_debug (_rerocc_manager_io_ptw_status_debug), // @[Manager.scala:209:34]
.io_dpath_status_cease (_rerocc_manager_io_ptw_status_cease), // @[Manager.scala:209:34]
.io_dpath_status_wfi (_rerocc_manager_io_ptw_status_wfi), // @[Manager.scala:209:34]
.io_dpath_status_isa (_rerocc_manager_io_ptw_status_isa), // @[Manager.scala:209:34]
.io_dpath_status_dprv (_rerocc_manager_io_ptw_status_dprv), // @[Manager.scala:209:34]
.io_dpath_status_dv (_rerocc_manager_io_ptw_status_dv), // @[Manager.scala:209:34]
.io_dpath_status_prv (_rerocc_manager_io_ptw_status_prv), // @[Manager.scala:209:34]
.io_dpath_status_v (_rerocc_manager_io_ptw_status_v), // @[Manager.scala:209:34]
.io_dpath_status_sd (_rerocc_manager_io_ptw_status_sd), // @[Manager.scala:209:34]
.io_dpath_status_zero2 (_rerocc_manager_io_ptw_status_zero2), // @[Manager.scala:209:34]
.io_dpath_status_mpv (_rerocc_manager_io_ptw_status_mpv), // @[Manager.scala:209:34]
.io_dpath_status_gva (_rerocc_manager_io_ptw_status_gva), // @[Manager.scala:209:34]
.io_dpath_status_mbe (_rerocc_manager_io_ptw_status_mbe), // @[Manager.scala:209:34]
.io_dpath_status_sbe (_rerocc_manager_io_ptw_status_sbe), // @[Manager.scala:209:34]
.io_dpath_status_sxl (_rerocc_manager_io_ptw_status_sxl), // @[Manager.scala:209:34]
.io_dpath_status_uxl (_rerocc_manager_io_ptw_status_uxl), // @[Manager.scala:209:34]
.io_dpath_status_sd_rv32 (_rerocc_manager_io_ptw_status_sd_rv32), // @[Manager.scala:209:34]
.io_dpath_status_zero1 (_rerocc_manager_io_ptw_status_zero1), // @[Manager.scala:209:34]
.io_dpath_status_tsr (_rerocc_manager_io_ptw_status_tsr), // @[Manager.scala:209:34]
.io_dpath_status_tw (_rerocc_manager_io_ptw_status_tw), // @[Manager.scala:209:34]
.io_dpath_status_tvm (_rerocc_manager_io_ptw_status_tvm), // @[Manager.scala:209:34]
.io_dpath_status_mxr (_rerocc_manager_io_ptw_status_mxr), // @[Manager.scala:209:34]
.io_dpath_status_sum (_rerocc_manager_io_ptw_status_sum), // @[Manager.scala:209:34]
.io_dpath_status_mprv (_rerocc_manager_io_ptw_status_mprv), // @[Manager.scala:209:34]
.io_dpath_status_xs (_rerocc_manager_io_ptw_status_xs), // @[Manager.scala:209:34]
.io_dpath_status_fs (_rerocc_manager_io_ptw_status_fs), // @[Manager.scala:209:34]
.io_dpath_status_mpp (_rerocc_manager_io_ptw_status_mpp), // @[Manager.scala:209:34]
.io_dpath_status_vs (_rerocc_manager_io_ptw_status_vs), // @[Manager.scala:209:34]
.io_dpath_status_spp (_rerocc_manager_io_ptw_status_spp), // @[Manager.scala:209:34]
.io_dpath_status_mpie (_rerocc_manager_io_ptw_status_mpie), // @[Manager.scala:209:34]
.io_dpath_status_ube (_rerocc_manager_io_ptw_status_ube), // @[Manager.scala:209:34]
.io_dpath_status_spie (_rerocc_manager_io_ptw_status_spie), // @[Manager.scala:209:34]
.io_dpath_status_upie (_rerocc_manager_io_ptw_status_upie), // @[Manager.scala:209:34]
.io_dpath_status_mie (_rerocc_manager_io_ptw_status_mie), // @[Manager.scala:209:34]
.io_dpath_status_hie (_rerocc_manager_io_ptw_status_hie), // @[Manager.scala:209:34]
.io_dpath_status_sie (_rerocc_manager_io_ptw_status_sie), // @[Manager.scala:209:34]
.io_dpath_status_uie (_rerocc_manager_io_ptw_status_uie), // @[Manager.scala:209:34]
.io_dpath_perf_pte_miss (_ptw_io_dpath_perf_pte_miss),
.io_dpath_clock_enabled (_ptw_io_dpath_clock_enabled)
); // @[Manager.scala:243:21]
SimpleHellaCacheIF_5 dcIF ( // @[Manager.scala:255:22]
.clock (clock),
.reset (reset),
.io_requestor_req_ready (_dcIF_io_requestor_req_ready),
.io_requestor_resp_valid (_dcIF_io_requestor_resp_valid),
.io_requestor_resp_bits_addr (_dcIF_io_requestor_resp_bits_addr),
.io_requestor_resp_bits_tag (_dcIF_io_requestor_resp_bits_tag),
.io_requestor_resp_bits_cmd (_dcIF_io_requestor_resp_bits_cmd),
.io_requestor_resp_bits_size (_dcIF_io_requestor_resp_bits_size),
.io_requestor_resp_bits_signed (_dcIF_io_requestor_resp_bits_signed),
.io_requestor_resp_bits_dprv (_dcIF_io_requestor_resp_bits_dprv),
.io_requestor_resp_bits_dv (_dcIF_io_requestor_resp_bits_dv),
.io_requestor_resp_bits_data (_dcIF_io_requestor_resp_bits_data),
.io_requestor_resp_bits_mask (_dcIF_io_requestor_resp_bits_mask),
.io_requestor_resp_bits_replay (_dcIF_io_requestor_resp_bits_replay),
.io_requestor_resp_bits_has_data (_dcIF_io_requestor_resp_bits_has_data),
.io_requestor_resp_bits_data_word_bypass (_dcIF_io_requestor_resp_bits_data_word_bypass),
.io_requestor_resp_bits_data_raw (_dcIF_io_requestor_resp_bits_data_raw),
.io_requestor_resp_bits_store_data (_dcIF_io_requestor_resp_bits_store_data),
.io_cache_req_ready (_dcacheArb_io_requestor_1_req_ready), // @[Manager.scala:238:27]
.io_cache_req_valid (_dcIF_io_cache_req_valid),
.io_cache_s1_data_data (_dcIF_io_cache_s1_data_data),
.io_cache_s1_data_mask (_dcIF_io_cache_s1_data_mask),
.io_cache_s2_nack (_dcacheArb_io_requestor_1_s2_nack), // @[Manager.scala:238:27]
.io_cache_s2_nack_cause_raw (_dcacheArb_io_requestor_1_s2_nack_cause_raw), // @[Manager.scala:238:27]
.io_cache_s2_uncached (_dcacheArb_io_requestor_1_s2_uncached), // @[Manager.scala:238:27]
.io_cache_s2_paddr (_dcacheArb_io_requestor_1_s2_paddr), // @[Manager.scala:238:27]
.io_cache_resp_valid (_dcacheArb_io_requestor_1_resp_valid), // @[Manager.scala:238:27]
.io_cache_resp_bits_addr (_dcacheArb_io_requestor_1_resp_bits_addr), // @[Manager.scala:238:27]
.io_cache_resp_bits_tag (_dcacheArb_io_requestor_1_resp_bits_tag), // @[Manager.scala:238:27]
.io_cache_resp_bits_cmd (_dcacheArb_io_requestor_1_resp_bits_cmd), // @[Manager.scala:238:27]
.io_cache_resp_bits_size (_dcacheArb_io_requestor_1_resp_bits_size), // @[Manager.scala:238:27]
.io_cache_resp_bits_signed (_dcacheArb_io_requestor_1_resp_bits_signed), // @[Manager.scala:238:27]
.io_cache_resp_bits_dprv (_dcacheArb_io_requestor_1_resp_bits_dprv), // @[Manager.scala:238:27]
.io_cache_resp_bits_dv (_dcacheArb_io_requestor_1_resp_bits_dv), // @[Manager.scala:238:27]
.io_cache_resp_bits_data (_dcacheArb_io_requestor_1_resp_bits_data), // @[Manager.scala:238:27]
.io_cache_resp_bits_mask (_dcacheArb_io_requestor_1_resp_bits_mask), // @[Manager.scala:238:27]
.io_cache_resp_bits_replay (_dcacheArb_io_requestor_1_resp_bits_replay), // @[Manager.scala:238:27]
.io_cache_resp_bits_has_data (_dcacheArb_io_requestor_1_resp_bits_has_data), // @[Manager.scala:238:27]
.io_cache_resp_bits_data_word_bypass (_dcacheArb_io_requestor_1_resp_bits_data_word_bypass), // @[Manager.scala:238:27]
.io_cache_resp_bits_data_raw (_dcacheArb_io_requestor_1_resp_bits_data_raw), // @[Manager.scala:238:27]
.io_cache_resp_bits_store_data (_dcacheArb_io_requestor_1_resp_bits_store_data), // @[Manager.scala:238:27]
.io_cache_replay_next (_dcacheArb_io_requestor_1_replay_next), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_ma_ld (_dcacheArb_io_requestor_1_s2_xcpt_ma_ld), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_ma_st (_dcacheArb_io_requestor_1_s2_xcpt_ma_st), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_pf_ld (_dcacheArb_io_requestor_1_s2_xcpt_pf_ld), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_pf_st (_dcacheArb_io_requestor_1_s2_xcpt_pf_st), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_ae_ld (_dcacheArb_io_requestor_1_s2_xcpt_ae_ld), // @[Manager.scala:238:27]
.io_cache_s2_xcpt_ae_st (_dcacheArb_io_requestor_1_s2_xcpt_ae_st), // @[Manager.scala:238:27]
.io_cache_s2_gpa (_dcacheArb_io_requestor_1_s2_gpa), // @[Manager.scala:238:27]
.io_cache_ordered (_dcacheArb_io_requestor_1_ordered), // @[Manager.scala:238:27]
.io_cache_store_pending (_dcacheArb_io_requestor_1_store_pending), // @[Manager.scala:238:27]
.io_cache_perf_acquire (_dcacheArb_io_requestor_1_perf_acquire), // @[Manager.scala:238:27]
.io_cache_perf_release (_dcacheArb_io_requestor_1_perf_release), // @[Manager.scala:238:27]
.io_cache_perf_grant (_dcacheArb_io_requestor_1_perf_grant), // @[Manager.scala:238:27]
.io_cache_perf_tlbMiss (_dcacheArb_io_requestor_1_perf_tlbMiss), // @[Manager.scala:238:27]
.io_cache_perf_blocked (_dcacheArb_io_requestor_1_perf_blocked), // @[Manager.scala:238:27]
.io_cache_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad), // @[Manager.scala:238:27]
.io_cache_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW), // @[Manager.scala:238:27]
.io_cache_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad), // @[Manager.scala:238:27]
.io_cache_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad), // @[Manager.scala:238:27]
.io_cache_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore) // @[Manager.scala:238:27]
); // @[Manager.scala:255:22]
assign auto_ctrl_ctrl_in_a_ready = auto_ctrl_ctrl_in_a_ready_0; // @[Manager.scala:237:34]
assign auto_ctrl_ctrl_in_d_valid = auto_ctrl_ctrl_in_d_valid_0; // @[Manager.scala:237:34]
assign auto_ctrl_ctrl_in_d_bits_opcode = auto_ctrl_ctrl_in_d_bits_opcode_0; // @[Manager.scala:237:34]
assign auto_ctrl_ctrl_in_d_bits_size = auto_ctrl_ctrl_in_d_bits_size_0; // @[Manager.scala:237:34]
assign auto_ctrl_ctrl_in_d_bits_source = auto_ctrl_ctrl_in_d_bits_source_0; // @[Manager.scala:237:34]
assign auto_ctrl_ctrl_in_d_bits_data = auto_ctrl_ctrl_in_d_bits_data_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_valid = auto_buffer_out_a_valid_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_opcode = auto_buffer_out_a_bits_opcode_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_param = auto_buffer_out_a_bits_param_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_size = auto_buffer_out_a_bits_size_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_source = auto_buffer_out_a_bits_source_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_address = auto_buffer_out_a_bits_address_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_mask = auto_buffer_out_a_bits_mask_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_data = auto_buffer_out_a_bits_data_0; // @[Manager.scala:237:34]
assign auto_buffer_out_a_bits_corrupt = auto_buffer_out_a_bits_corrupt_0; // @[Manager.scala:237:34]
assign auto_buffer_out_b_ready = auto_buffer_out_b_ready_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_valid = auto_buffer_out_c_valid_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_opcode = auto_buffer_out_c_bits_opcode_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_param = auto_buffer_out_c_bits_param_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_size = auto_buffer_out_c_bits_size_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_source = auto_buffer_out_c_bits_source_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_address = auto_buffer_out_c_bits_address_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_data = auto_buffer_out_c_bits_data_0; // @[Manager.scala:237:34]
assign auto_buffer_out_c_bits_corrupt = auto_buffer_out_c_bits_corrupt_0; // @[Manager.scala:237:34]
assign auto_buffer_out_d_ready = auto_buffer_out_d_ready_0; // @[Manager.scala:237:34]
assign auto_buffer_out_e_valid = auto_buffer_out_e_valid_0; // @[Manager.scala:237:34]
assign auto_buffer_out_e_bits_sink = auto_buffer_out_e_bits_sink_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_req_ready = auto_re_ro_cc_in_req_ready_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_valid = auto_re_ro_cc_in_resp_valid_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_opcode = auto_re_ro_cc_in_resp_bits_opcode_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_client_id = auto_re_ro_cc_in_resp_bits_client_id_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_manager_id = auto_re_ro_cc_in_resp_bits_manager_id_0; // @[Manager.scala:237:34]
assign auto_re_ro_cc_in_resp_bits_data = auto_re_ro_cc_in_resp_bits_data_0; // @[Manager.scala:237:34]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PMA.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.devices.debug.DebugModuleKey
import freechips.rocketchip.diplomacy.RegionType
import freechips.rocketchip.subsystem.CacheBlockBytes
import freechips.rocketchip.tile.{CoreModule, CoreBundle}
import freechips.rocketchip.tilelink.{TLSlavePortParameters, TLManagerParameters}
class PMAChecker(manager: TLSlavePortParameters)(implicit p: Parameters) extends CoreModule()(p) {
val io = IO(new Bundle {
val paddr = Input(UInt())
val resp = Output(new Bundle {
val cacheable = Bool()
val r = Bool()
val w = Bool()
val pp = Bool()
val al = Bool()
val aa = Bool()
val x = Bool()
val eff = Bool()
})
})
// PMA
// check exist a slave can consume this address.
val legal_address = manager.findSafe(io.paddr).reduce(_||_)
// check utility to help check SoC property.
def fastCheck(member: TLManagerParameters => Boolean) =
legal_address && manager.fastProperty(io.paddr, member, (b:Boolean) => b.B)
io.resp.cacheable := fastCheck(_.supportsAcquireB)
io.resp.r := fastCheck(_.supportsGet)
io.resp.w := fastCheck(_.supportsPutFull)
io.resp.pp := fastCheck(_.supportsPutPartial)
io.resp.al := fastCheck(_.supportsLogical)
io.resp.aa := fastCheck(_.supportsArithmetic)
io.resp.x := fastCheck(_.executable)
io.resp.eff := fastCheck(Seq(RegionType.PUT_EFFECTS, RegionType.GET_EFFECTS) contains _.regionType)
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
| module PMAChecker_18( // @[PMA.scala:18:7]
input clock, // @[PMA.scala:18:7]
input reset, // @[PMA.scala:18:7]
input [39:0] io_paddr, // @[PMA.scala:19:14]
output io_resp_cacheable, // @[PMA.scala:19:14]
output io_resp_r, // @[PMA.scala:19:14]
output io_resp_w, // @[PMA.scala:19:14]
output io_resp_pp, // @[PMA.scala:19:14]
output io_resp_al, // @[PMA.scala:19:14]
output io_resp_aa, // @[PMA.scala:19:14]
output io_resp_x, // @[PMA.scala:19:14]
output io_resp_eff // @[PMA.scala:19:14]
);
wire [39:0] io_paddr_0 = io_paddr; // @[PMA.scala:18:7]
wire [40:0] _io_resp_r_T_2 = 41'h0; // @[Parameters.scala:137:46]
wire [40:0] _io_resp_r_T_3 = 41'h0; // @[Parameters.scala:137:46]
wire _io_resp_r_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire _io_resp_cacheable_T_28 = 1'h0; // @[Mux.scala:30:73]
wire _io_resp_w_T_47 = 1'h0; // @[Mux.scala:30:73]
wire _io_resp_pp_T_47 = 1'h0; // @[Mux.scala:30:73]
wire _io_resp_al_T_47 = 1'h0; // @[Mux.scala:30:73]
wire _io_resp_aa_T_47 = 1'h0; // @[Mux.scala:30:73]
wire _io_resp_x_T_65 = 1'h0; // @[Mux.scala:30:73]
wire _io_resp_eff_T_59 = 1'h0; // @[Mux.scala:30:73]
wire [39:0] _legal_address_T = io_paddr_0; // @[PMA.scala:18:7]
wire [39:0] _io_resp_cacheable_T = io_paddr_0; // @[PMA.scala:18:7]
wire _io_resp_cacheable_T_31; // @[PMA.scala:39:19]
wire [39:0] _io_resp_r_T = io_paddr_0; // @[PMA.scala:18:7]
wire [39:0] _io_resp_w_T = io_paddr_0; // @[PMA.scala:18:7]
wire [39:0] _io_resp_pp_T = io_paddr_0; // @[PMA.scala:18:7]
wire [39:0] _io_resp_al_T = io_paddr_0; // @[PMA.scala:18:7]
wire [39:0] _io_resp_aa_T = io_paddr_0; // @[PMA.scala:18:7]
wire [39:0] _io_resp_x_T = io_paddr_0; // @[PMA.scala:18:7]
wire [39:0] _io_resp_eff_T = io_paddr_0; // @[PMA.scala:18:7]
wire _io_resp_r_T_5; // @[PMA.scala:39:19]
wire _io_resp_w_T_49; // @[PMA.scala:39:19]
wire _io_resp_pp_T_49; // @[PMA.scala:39:19]
wire _io_resp_al_T_49; // @[PMA.scala:39:19]
wire _io_resp_aa_T_49; // @[PMA.scala:39:19]
wire _io_resp_x_T_67; // @[PMA.scala:39:19]
wire _io_resp_eff_T_61; // @[PMA.scala:39:19]
wire io_resp_cacheable_0; // @[PMA.scala:18:7]
wire io_resp_r_0; // @[PMA.scala:18:7]
wire io_resp_w_0; // @[PMA.scala:18:7]
wire io_resp_pp_0; // @[PMA.scala:18:7]
wire io_resp_al_0; // @[PMA.scala:18:7]
wire io_resp_aa_0; // @[PMA.scala:18:7]
wire io_resp_x_0; // @[PMA.scala:18:7]
wire io_resp_eff_0; // @[PMA.scala:18:7]
wire [40:0] _legal_address_T_1 = {1'h0, _legal_address_T}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_2 = _legal_address_T_1 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_3 = _legal_address_T_2; // @[Parameters.scala:137:46]
wire _legal_address_T_4 = _legal_address_T_3 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_0 = _legal_address_T_4; // @[Parameters.scala:612:40]
wire [39:0] _GEN = {io_paddr_0[39:13], io_paddr_0[12:0] ^ 13'h1000}; // @[PMA.scala:18:7]
wire [39:0] _legal_address_T_5; // @[Parameters.scala:137:31]
assign _legal_address_T_5 = _GEN; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_x_T_29; // @[Parameters.scala:137:31]
assign _io_resp_x_T_29 = _GEN; // @[Parameters.scala:137:31]
wire [40:0] _legal_address_T_6 = {1'h0, _legal_address_T_5}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_7 = _legal_address_T_6 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_8 = _legal_address_T_7; // @[Parameters.scala:137:46]
wire _legal_address_T_9 = _legal_address_T_8 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_1 = _legal_address_T_9; // @[Parameters.scala:612:40]
wire [39:0] _GEN_0 = {io_paddr_0[39:14], io_paddr_0[13:0] ^ 14'h3000}; // @[PMA.scala:18:7]
wire [39:0] _legal_address_T_10; // @[Parameters.scala:137:31]
assign _legal_address_T_10 = _GEN_0; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_x_T_5; // @[Parameters.scala:137:31]
assign _io_resp_x_T_5 = _GEN_0; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_eff_T_35; // @[Parameters.scala:137:31]
assign _io_resp_eff_T_35 = _GEN_0; // @[Parameters.scala:137:31]
wire [40:0] _legal_address_T_11 = {1'h0, _legal_address_T_10}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_12 = _legal_address_T_11 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_13 = _legal_address_T_12; // @[Parameters.scala:137:46]
wire _legal_address_T_14 = _legal_address_T_13 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_2 = _legal_address_T_14; // @[Parameters.scala:612:40]
wire [39:0] _GEN_1 = {io_paddr_0[39:17], io_paddr_0[16:0] ^ 17'h10000}; // @[PMA.scala:18:7]
wire [39:0] _legal_address_T_15; // @[Parameters.scala:137:31]
assign _legal_address_T_15 = _GEN_1; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_cacheable_T_5; // @[Parameters.scala:137:31]
assign _io_resp_cacheable_T_5 = _GEN_1; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_w_T_41; // @[Parameters.scala:137:31]
assign _io_resp_w_T_41 = _GEN_1; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_pp_T_41; // @[Parameters.scala:137:31]
assign _io_resp_pp_T_41 = _GEN_1; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_al_T_41; // @[Parameters.scala:137:31]
assign _io_resp_al_T_41 = _GEN_1; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_aa_T_41; // @[Parameters.scala:137:31]
assign _io_resp_aa_T_41 = _GEN_1; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_x_T_10; // @[Parameters.scala:137:31]
assign _io_resp_x_T_10 = _GEN_1; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_eff_T_40; // @[Parameters.scala:137:31]
assign _io_resp_eff_T_40 = _GEN_1; // @[Parameters.scala:137:31]
wire [40:0] _legal_address_T_16 = {1'h0, _legal_address_T_15}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_17 = _legal_address_T_16 & 41'h1FFFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_18 = _legal_address_T_17; // @[Parameters.scala:137:46]
wire _legal_address_T_19 = _legal_address_T_18 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_3 = _legal_address_T_19; // @[Parameters.scala:612:40]
wire [39:0] _GEN_2 = {io_paddr_0[39:21], io_paddr_0[20:0] ^ 21'h100000}; // @[PMA.scala:18:7]
wire [39:0] _legal_address_T_20; // @[Parameters.scala:137:31]
assign _legal_address_T_20 = _GEN_2; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_w_T_5; // @[Parameters.scala:137:31]
assign _io_resp_w_T_5 = _GEN_2; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_pp_T_5; // @[Parameters.scala:137:31]
assign _io_resp_pp_T_5 = _GEN_2; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_al_T_5; // @[Parameters.scala:137:31]
assign _io_resp_al_T_5 = _GEN_2; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_aa_T_5; // @[Parameters.scala:137:31]
assign _io_resp_aa_T_5 = _GEN_2; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_x_T_34; // @[Parameters.scala:137:31]
assign _io_resp_x_T_34 = _GEN_2; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_eff_T_5; // @[Parameters.scala:137:31]
assign _io_resp_eff_T_5 = _GEN_2; // @[Parameters.scala:137:31]
wire [40:0] _legal_address_T_21 = {1'h0, _legal_address_T_20}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_22 = _legal_address_T_21 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_23 = _legal_address_T_22; // @[Parameters.scala:137:46]
wire _legal_address_T_24 = _legal_address_T_23 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_4 = _legal_address_T_24; // @[Parameters.scala:612:40]
wire [39:0] _legal_address_T_25 = {io_paddr_0[39:21], io_paddr_0[20:0] ^ 21'h110000}; // @[PMA.scala:18:7]
wire [40:0] _legal_address_T_26 = {1'h0, _legal_address_T_25}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_27 = _legal_address_T_26 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_28 = _legal_address_T_27; // @[Parameters.scala:137:46]
wire _legal_address_T_29 = _legal_address_T_28 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_5 = _legal_address_T_29; // @[Parameters.scala:612:40]
wire [39:0] _GEN_3 = {io_paddr_0[39:26], io_paddr_0[25:0] ^ 26'h2000000}; // @[PMA.scala:18:7]
wire [39:0] _legal_address_T_30; // @[Parameters.scala:137:31]
assign _legal_address_T_30 = _GEN_3; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_x_T_39; // @[Parameters.scala:137:31]
assign _io_resp_x_T_39 = _GEN_3; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_eff_T_10; // @[Parameters.scala:137:31]
assign _io_resp_eff_T_10 = _GEN_3; // @[Parameters.scala:137:31]
wire [40:0] _legal_address_T_31 = {1'h0, _legal_address_T_30}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_32 = _legal_address_T_31 & 41'h1FFFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_33 = _legal_address_T_32; // @[Parameters.scala:137:46]
wire _legal_address_T_34 = _legal_address_T_33 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_6 = _legal_address_T_34; // @[Parameters.scala:612:40]
wire [39:0] _GEN_4 = {io_paddr_0[39:26], io_paddr_0[25:0] ^ 26'h2010000}; // @[PMA.scala:18:7]
wire [39:0] _legal_address_T_35; // @[Parameters.scala:137:31]
assign _legal_address_T_35 = _GEN_4; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_w_T_10; // @[Parameters.scala:137:31]
assign _io_resp_w_T_10 = _GEN_4; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_pp_T_10; // @[Parameters.scala:137:31]
assign _io_resp_pp_T_10 = _GEN_4; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_al_T_10; // @[Parameters.scala:137:31]
assign _io_resp_al_T_10 = _GEN_4; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_aa_T_10; // @[Parameters.scala:137:31]
assign _io_resp_aa_T_10 = _GEN_4; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_x_T_44; // @[Parameters.scala:137:31]
assign _io_resp_x_T_44 = _GEN_4; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_eff_T_15; // @[Parameters.scala:137:31]
assign _io_resp_eff_T_15 = _GEN_4; // @[Parameters.scala:137:31]
wire [40:0] _legal_address_T_36 = {1'h0, _legal_address_T_35}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_37 = _legal_address_T_36 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_38 = _legal_address_T_37; // @[Parameters.scala:137:46]
wire _legal_address_T_39 = _legal_address_T_38 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_7 = _legal_address_T_39; // @[Parameters.scala:612:40]
wire [39:0] _GEN_5 = {io_paddr_0[39:28], io_paddr_0[27:0] ^ 28'h8000000}; // @[PMA.scala:18:7]
wire [39:0] _legal_address_T_40; // @[Parameters.scala:137:31]
assign _legal_address_T_40 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_cacheable_T_17; // @[Parameters.scala:137:31]
assign _io_resp_cacheable_T_17 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_w_T_15; // @[Parameters.scala:137:31]
assign _io_resp_w_T_15 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_w_T_20; // @[Parameters.scala:137:31]
assign _io_resp_w_T_20 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_pp_T_15; // @[Parameters.scala:137:31]
assign _io_resp_pp_T_15 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_pp_T_20; // @[Parameters.scala:137:31]
assign _io_resp_pp_T_20 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_al_T_15; // @[Parameters.scala:137:31]
assign _io_resp_al_T_15 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_al_T_20; // @[Parameters.scala:137:31]
assign _io_resp_al_T_20 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_aa_T_15; // @[Parameters.scala:137:31]
assign _io_resp_aa_T_15 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_aa_T_20; // @[Parameters.scala:137:31]
assign _io_resp_aa_T_20 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_x_T_15; // @[Parameters.scala:137:31]
assign _io_resp_x_T_15 = _GEN_5; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_eff_T_45; // @[Parameters.scala:137:31]
assign _io_resp_eff_T_45 = _GEN_5; // @[Parameters.scala:137:31]
wire [40:0] _legal_address_T_41 = {1'h0, _legal_address_T_40}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_42 = _legal_address_T_41 & 41'h1FFFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_43 = _legal_address_T_42; // @[Parameters.scala:137:46]
wire _legal_address_T_44 = _legal_address_T_43 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_8 = _legal_address_T_44; // @[Parameters.scala:612:40]
wire [39:0] _GEN_6 = {io_paddr_0[39:28], io_paddr_0[27:0] ^ 28'hC000000}; // @[PMA.scala:18:7]
wire [39:0] _legal_address_T_45; // @[Parameters.scala:137:31]
assign _legal_address_T_45 = _GEN_6; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_cacheable_T_10; // @[Parameters.scala:137:31]
assign _io_resp_cacheable_T_10 = _GEN_6; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_x_T_49; // @[Parameters.scala:137:31]
assign _io_resp_x_T_49 = _GEN_6; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_eff_T_20; // @[Parameters.scala:137:31]
assign _io_resp_eff_T_20 = _GEN_6; // @[Parameters.scala:137:31]
wire [40:0] _legal_address_T_46 = {1'h0, _legal_address_T_45}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_47 = _legal_address_T_46 & 41'h1FFFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_48 = _legal_address_T_47; // @[Parameters.scala:137:46]
wire _legal_address_T_49 = _legal_address_T_48 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_9 = _legal_address_T_49; // @[Parameters.scala:612:40]
wire [39:0] _legal_address_T_50 = {io_paddr_0[39:29], io_paddr_0[28:0] ^ 29'h10020000}; // @[PMA.scala:18:7]
wire [40:0] _legal_address_T_51 = {1'h0, _legal_address_T_50}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_52 = _legal_address_T_51 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_53 = _legal_address_T_52; // @[Parameters.scala:137:46]
wire _legal_address_T_54 = _legal_address_T_53 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_10 = _legal_address_T_54; // @[Parameters.scala:612:40]
wire [39:0] _GEN_7 = {io_paddr_0[39:32], io_paddr_0[31:0] ^ 32'h80000000}; // @[PMA.scala:18:7]
wire [39:0] _legal_address_T_55; // @[Parameters.scala:137:31]
assign _legal_address_T_55 = _GEN_7; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_cacheable_T_22; // @[Parameters.scala:137:31]
assign _io_resp_cacheable_T_22 = _GEN_7; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_w_T_30; // @[Parameters.scala:137:31]
assign _io_resp_w_T_30 = _GEN_7; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_pp_T_30; // @[Parameters.scala:137:31]
assign _io_resp_pp_T_30 = _GEN_7; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_al_T_30; // @[Parameters.scala:137:31]
assign _io_resp_al_T_30 = _GEN_7; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_aa_T_30; // @[Parameters.scala:137:31]
assign _io_resp_aa_T_30 = _GEN_7; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_x_T_20; // @[Parameters.scala:137:31]
assign _io_resp_x_T_20 = _GEN_7; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_eff_T_50; // @[Parameters.scala:137:31]
assign _io_resp_eff_T_50 = _GEN_7; // @[Parameters.scala:137:31]
wire [40:0] _legal_address_T_56 = {1'h0, _legal_address_T_55}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _legal_address_T_57 = _legal_address_T_56 & 41'h1FFF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _legal_address_T_58 = _legal_address_T_57; // @[Parameters.scala:137:46]
wire _legal_address_T_59 = _legal_address_T_58 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _legal_address_WIRE_11 = _legal_address_T_59; // @[Parameters.scala:612:40]
wire _legal_address_T_60 = _legal_address_WIRE_0 | _legal_address_WIRE_1; // @[Parameters.scala:612:40]
wire _legal_address_T_61 = _legal_address_T_60 | _legal_address_WIRE_2; // @[Parameters.scala:612:40]
wire _legal_address_T_62 = _legal_address_T_61 | _legal_address_WIRE_3; // @[Parameters.scala:612:40]
wire _legal_address_T_63 = _legal_address_T_62 | _legal_address_WIRE_4; // @[Parameters.scala:612:40]
wire _legal_address_T_64 = _legal_address_T_63 | _legal_address_WIRE_5; // @[Parameters.scala:612:40]
wire _legal_address_T_65 = _legal_address_T_64 | _legal_address_WIRE_6; // @[Parameters.scala:612:40]
wire _legal_address_T_66 = _legal_address_T_65 | _legal_address_WIRE_7; // @[Parameters.scala:612:40]
wire _legal_address_T_67 = _legal_address_T_66 | _legal_address_WIRE_8; // @[Parameters.scala:612:40]
wire _legal_address_T_68 = _legal_address_T_67 | _legal_address_WIRE_9; // @[Parameters.scala:612:40]
wire _legal_address_T_69 = _legal_address_T_68 | _legal_address_WIRE_10; // @[Parameters.scala:612:40]
wire legal_address = _legal_address_T_69 | _legal_address_WIRE_11; // @[Parameters.scala:612:40]
assign _io_resp_r_T_5 = legal_address; // @[PMA.scala:36:58, :39:19]
wire [40:0] _io_resp_cacheable_T_1 = {1'h0, _io_resp_cacheable_T}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_cacheable_T_2 = _io_resp_cacheable_T_1 & 41'h8C000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_cacheable_T_3 = _io_resp_cacheable_T_2; // @[Parameters.scala:137:46]
wire _io_resp_cacheable_T_4 = _io_resp_cacheable_T_3 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_cacheable_T_6 = {1'h0, _io_resp_cacheable_T_5}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_cacheable_T_7 = _io_resp_cacheable_T_6 & 41'h8C011000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_cacheable_T_8 = _io_resp_cacheable_T_7; // @[Parameters.scala:137:46]
wire _io_resp_cacheable_T_9 = _io_resp_cacheable_T_8 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_cacheable_T_11 = {1'h0, _io_resp_cacheable_T_10}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_cacheable_T_12 = _io_resp_cacheable_T_11 & 41'h8C000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_cacheable_T_13 = _io_resp_cacheable_T_12; // @[Parameters.scala:137:46]
wire _io_resp_cacheable_T_14 = _io_resp_cacheable_T_13 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_cacheable_T_15 = _io_resp_cacheable_T_4 | _io_resp_cacheable_T_9; // @[Parameters.scala:629:89]
wire _io_resp_cacheable_T_16 = _io_resp_cacheable_T_15 | _io_resp_cacheable_T_14; // @[Parameters.scala:629:89]
wire [40:0] _io_resp_cacheable_T_18 = {1'h0, _io_resp_cacheable_T_17}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_cacheable_T_19 = _io_resp_cacheable_T_18 & 41'h8C010000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_cacheable_T_20 = _io_resp_cacheable_T_19; // @[Parameters.scala:137:46]
wire _io_resp_cacheable_T_21 = _io_resp_cacheable_T_20 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_cacheable_T_23 = {1'h0, _io_resp_cacheable_T_22}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_cacheable_T_24 = _io_resp_cacheable_T_23 & 41'h80000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_cacheable_T_25 = _io_resp_cacheable_T_24; // @[Parameters.scala:137:46]
wire _io_resp_cacheable_T_26 = _io_resp_cacheable_T_25 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_cacheable_T_27 = _io_resp_cacheable_T_21 | _io_resp_cacheable_T_26; // @[Parameters.scala:629:89]
wire _io_resp_cacheable_T_29 = _io_resp_cacheable_T_27; // @[Mux.scala:30:73]
wire _io_resp_cacheable_T_30 = _io_resp_cacheable_T_29; // @[Mux.scala:30:73]
wire _io_resp_cacheable_WIRE = _io_resp_cacheable_T_30; // @[Mux.scala:30:73]
assign _io_resp_cacheable_T_31 = legal_address & _io_resp_cacheable_WIRE; // @[Mux.scala:30:73]
assign io_resp_cacheable_0 = _io_resp_cacheable_T_31; // @[PMA.scala:18:7, :39:19]
wire [40:0] _io_resp_r_T_1 = {1'h0, _io_resp_r_T}; // @[Parameters.scala:137:{31,41}]
assign io_resp_r_0 = _io_resp_r_T_5; // @[PMA.scala:18:7, :39:19]
wire [40:0] _io_resp_w_T_1 = {1'h0, _io_resp_w_T}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_w_T_2 = _io_resp_w_T_1 & 41'h98110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_w_T_3 = _io_resp_w_T_2; // @[Parameters.scala:137:46]
wire _io_resp_w_T_4 = _io_resp_w_T_3 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_w_T_6 = {1'h0, _io_resp_w_T_5}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_w_T_7 = _io_resp_w_T_6 & 41'h9A101000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_w_T_8 = _io_resp_w_T_7; // @[Parameters.scala:137:46]
wire _io_resp_w_T_9 = _io_resp_w_T_8 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_w_T_11 = {1'h0, _io_resp_w_T_10}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_w_T_12 = _io_resp_w_T_11 & 41'h9A111000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_w_T_13 = _io_resp_w_T_12; // @[Parameters.scala:137:46]
wire _io_resp_w_T_14 = _io_resp_w_T_13 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_w_T_16 = {1'h0, _io_resp_w_T_15}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_w_T_17 = _io_resp_w_T_16 & 41'h98000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_w_T_18 = _io_resp_w_T_17; // @[Parameters.scala:137:46]
wire _io_resp_w_T_19 = _io_resp_w_T_18 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_w_T_21 = {1'h0, _io_resp_w_T_20}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_w_T_22 = _io_resp_w_T_21 & 41'h9A110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_w_T_23 = _io_resp_w_T_22; // @[Parameters.scala:137:46]
wire _io_resp_w_T_24 = _io_resp_w_T_23 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_8 = {io_paddr_0[39:29], io_paddr_0[28:0] ^ 29'h10000000}; // @[PMA.scala:18:7]
wire [39:0] _io_resp_w_T_25; // @[Parameters.scala:137:31]
assign _io_resp_w_T_25 = _GEN_8; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_pp_T_25; // @[Parameters.scala:137:31]
assign _io_resp_pp_T_25 = _GEN_8; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_al_T_25; // @[Parameters.scala:137:31]
assign _io_resp_al_T_25 = _GEN_8; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_aa_T_25; // @[Parameters.scala:137:31]
assign _io_resp_aa_T_25 = _GEN_8; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_x_T_54; // @[Parameters.scala:137:31]
assign _io_resp_x_T_54 = _GEN_8; // @[Parameters.scala:137:31]
wire [39:0] _io_resp_eff_T_25; // @[Parameters.scala:137:31]
assign _io_resp_eff_T_25 = _GEN_8; // @[Parameters.scala:137:31]
wire [40:0] _io_resp_w_T_26 = {1'h0, _io_resp_w_T_25}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_w_T_27 = _io_resp_w_T_26 & 41'h9A111000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_w_T_28 = _io_resp_w_T_27; // @[Parameters.scala:137:46]
wire _io_resp_w_T_29 = _io_resp_w_T_28 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_w_T_31 = {1'h0, _io_resp_w_T_30}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_w_T_32 = _io_resp_w_T_31 & 41'h90000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_w_T_33 = _io_resp_w_T_32; // @[Parameters.scala:137:46]
wire _io_resp_w_T_34 = _io_resp_w_T_33 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_w_T_35 = _io_resp_w_T_4 | _io_resp_w_T_9; // @[Parameters.scala:629:89]
wire _io_resp_w_T_36 = _io_resp_w_T_35 | _io_resp_w_T_14; // @[Parameters.scala:629:89]
wire _io_resp_w_T_37 = _io_resp_w_T_36 | _io_resp_w_T_19; // @[Parameters.scala:629:89]
wire _io_resp_w_T_38 = _io_resp_w_T_37 | _io_resp_w_T_24; // @[Parameters.scala:629:89]
wire _io_resp_w_T_39 = _io_resp_w_T_38 | _io_resp_w_T_29; // @[Parameters.scala:629:89]
wire _io_resp_w_T_40 = _io_resp_w_T_39 | _io_resp_w_T_34; // @[Parameters.scala:629:89]
wire _io_resp_w_T_46 = _io_resp_w_T_40; // @[Mux.scala:30:73]
wire [40:0] _io_resp_w_T_42 = {1'h0, _io_resp_w_T_41}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_w_T_43 = _io_resp_w_T_42 & 41'h9A110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_w_T_44 = _io_resp_w_T_43; // @[Parameters.scala:137:46]
wire _io_resp_w_T_45 = _io_resp_w_T_44 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_w_T_48 = _io_resp_w_T_46; // @[Mux.scala:30:73]
wire _io_resp_w_WIRE = _io_resp_w_T_48; // @[Mux.scala:30:73]
assign _io_resp_w_T_49 = legal_address & _io_resp_w_WIRE; // @[Mux.scala:30:73]
assign io_resp_w_0 = _io_resp_w_T_49; // @[PMA.scala:18:7, :39:19]
wire [40:0] _io_resp_pp_T_1 = {1'h0, _io_resp_pp_T}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_pp_T_2 = _io_resp_pp_T_1 & 41'h98110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_pp_T_3 = _io_resp_pp_T_2; // @[Parameters.scala:137:46]
wire _io_resp_pp_T_4 = _io_resp_pp_T_3 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_pp_T_6 = {1'h0, _io_resp_pp_T_5}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_pp_T_7 = _io_resp_pp_T_6 & 41'h9A101000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_pp_T_8 = _io_resp_pp_T_7; // @[Parameters.scala:137:46]
wire _io_resp_pp_T_9 = _io_resp_pp_T_8 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_pp_T_11 = {1'h0, _io_resp_pp_T_10}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_pp_T_12 = _io_resp_pp_T_11 & 41'h9A111000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_pp_T_13 = _io_resp_pp_T_12; // @[Parameters.scala:137:46]
wire _io_resp_pp_T_14 = _io_resp_pp_T_13 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_pp_T_16 = {1'h0, _io_resp_pp_T_15}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_pp_T_17 = _io_resp_pp_T_16 & 41'h98000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_pp_T_18 = _io_resp_pp_T_17; // @[Parameters.scala:137:46]
wire _io_resp_pp_T_19 = _io_resp_pp_T_18 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_pp_T_21 = {1'h0, _io_resp_pp_T_20}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_pp_T_22 = _io_resp_pp_T_21 & 41'h9A110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_pp_T_23 = _io_resp_pp_T_22; // @[Parameters.scala:137:46]
wire _io_resp_pp_T_24 = _io_resp_pp_T_23 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_pp_T_26 = {1'h0, _io_resp_pp_T_25}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_pp_T_27 = _io_resp_pp_T_26 & 41'h9A111000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_pp_T_28 = _io_resp_pp_T_27; // @[Parameters.scala:137:46]
wire _io_resp_pp_T_29 = _io_resp_pp_T_28 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_pp_T_31 = {1'h0, _io_resp_pp_T_30}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_pp_T_32 = _io_resp_pp_T_31 & 41'h90000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_pp_T_33 = _io_resp_pp_T_32; // @[Parameters.scala:137:46]
wire _io_resp_pp_T_34 = _io_resp_pp_T_33 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_pp_T_35 = _io_resp_pp_T_4 | _io_resp_pp_T_9; // @[Parameters.scala:629:89]
wire _io_resp_pp_T_36 = _io_resp_pp_T_35 | _io_resp_pp_T_14; // @[Parameters.scala:629:89]
wire _io_resp_pp_T_37 = _io_resp_pp_T_36 | _io_resp_pp_T_19; // @[Parameters.scala:629:89]
wire _io_resp_pp_T_38 = _io_resp_pp_T_37 | _io_resp_pp_T_24; // @[Parameters.scala:629:89]
wire _io_resp_pp_T_39 = _io_resp_pp_T_38 | _io_resp_pp_T_29; // @[Parameters.scala:629:89]
wire _io_resp_pp_T_40 = _io_resp_pp_T_39 | _io_resp_pp_T_34; // @[Parameters.scala:629:89]
wire _io_resp_pp_T_46 = _io_resp_pp_T_40; // @[Mux.scala:30:73]
wire [40:0] _io_resp_pp_T_42 = {1'h0, _io_resp_pp_T_41}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_pp_T_43 = _io_resp_pp_T_42 & 41'h9A110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_pp_T_44 = _io_resp_pp_T_43; // @[Parameters.scala:137:46]
wire _io_resp_pp_T_45 = _io_resp_pp_T_44 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_pp_T_48 = _io_resp_pp_T_46; // @[Mux.scala:30:73]
wire _io_resp_pp_WIRE = _io_resp_pp_T_48; // @[Mux.scala:30:73]
assign _io_resp_pp_T_49 = legal_address & _io_resp_pp_WIRE; // @[Mux.scala:30:73]
assign io_resp_pp_0 = _io_resp_pp_T_49; // @[PMA.scala:18:7, :39:19]
wire [40:0] _io_resp_al_T_1 = {1'h0, _io_resp_al_T}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_al_T_2 = _io_resp_al_T_1 & 41'h98110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_al_T_3 = _io_resp_al_T_2; // @[Parameters.scala:137:46]
wire _io_resp_al_T_4 = _io_resp_al_T_3 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_al_T_6 = {1'h0, _io_resp_al_T_5}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_al_T_7 = _io_resp_al_T_6 & 41'h9A101000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_al_T_8 = _io_resp_al_T_7; // @[Parameters.scala:137:46]
wire _io_resp_al_T_9 = _io_resp_al_T_8 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_al_T_11 = {1'h0, _io_resp_al_T_10}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_al_T_12 = _io_resp_al_T_11 & 41'h9A111000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_al_T_13 = _io_resp_al_T_12; // @[Parameters.scala:137:46]
wire _io_resp_al_T_14 = _io_resp_al_T_13 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_al_T_16 = {1'h0, _io_resp_al_T_15}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_al_T_17 = _io_resp_al_T_16 & 41'h98000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_al_T_18 = _io_resp_al_T_17; // @[Parameters.scala:137:46]
wire _io_resp_al_T_19 = _io_resp_al_T_18 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_al_T_21 = {1'h0, _io_resp_al_T_20}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_al_T_22 = _io_resp_al_T_21 & 41'h9A110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_al_T_23 = _io_resp_al_T_22; // @[Parameters.scala:137:46]
wire _io_resp_al_T_24 = _io_resp_al_T_23 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_al_T_26 = {1'h0, _io_resp_al_T_25}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_al_T_27 = _io_resp_al_T_26 & 41'h9A111000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_al_T_28 = _io_resp_al_T_27; // @[Parameters.scala:137:46]
wire _io_resp_al_T_29 = _io_resp_al_T_28 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_al_T_31 = {1'h0, _io_resp_al_T_30}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_al_T_32 = _io_resp_al_T_31 & 41'h90000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_al_T_33 = _io_resp_al_T_32; // @[Parameters.scala:137:46]
wire _io_resp_al_T_34 = _io_resp_al_T_33 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_al_T_35 = _io_resp_al_T_4 | _io_resp_al_T_9; // @[Parameters.scala:629:89]
wire _io_resp_al_T_36 = _io_resp_al_T_35 | _io_resp_al_T_14; // @[Parameters.scala:629:89]
wire _io_resp_al_T_37 = _io_resp_al_T_36 | _io_resp_al_T_19; // @[Parameters.scala:629:89]
wire _io_resp_al_T_38 = _io_resp_al_T_37 | _io_resp_al_T_24; // @[Parameters.scala:629:89]
wire _io_resp_al_T_39 = _io_resp_al_T_38 | _io_resp_al_T_29; // @[Parameters.scala:629:89]
wire _io_resp_al_T_40 = _io_resp_al_T_39 | _io_resp_al_T_34; // @[Parameters.scala:629:89]
wire _io_resp_al_T_46 = _io_resp_al_T_40; // @[Mux.scala:30:73]
wire [40:0] _io_resp_al_T_42 = {1'h0, _io_resp_al_T_41}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_al_T_43 = _io_resp_al_T_42 & 41'h9A110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_al_T_44 = _io_resp_al_T_43; // @[Parameters.scala:137:46]
wire _io_resp_al_T_45 = _io_resp_al_T_44 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_al_T_48 = _io_resp_al_T_46; // @[Mux.scala:30:73]
wire _io_resp_al_WIRE = _io_resp_al_T_48; // @[Mux.scala:30:73]
assign _io_resp_al_T_49 = legal_address & _io_resp_al_WIRE; // @[Mux.scala:30:73]
assign io_resp_al_0 = _io_resp_al_T_49; // @[PMA.scala:18:7, :39:19]
wire [40:0] _io_resp_aa_T_1 = {1'h0, _io_resp_aa_T}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_aa_T_2 = _io_resp_aa_T_1 & 41'h98110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_aa_T_3 = _io_resp_aa_T_2; // @[Parameters.scala:137:46]
wire _io_resp_aa_T_4 = _io_resp_aa_T_3 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_aa_T_6 = {1'h0, _io_resp_aa_T_5}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_aa_T_7 = _io_resp_aa_T_6 & 41'h9A101000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_aa_T_8 = _io_resp_aa_T_7; // @[Parameters.scala:137:46]
wire _io_resp_aa_T_9 = _io_resp_aa_T_8 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_aa_T_11 = {1'h0, _io_resp_aa_T_10}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_aa_T_12 = _io_resp_aa_T_11 & 41'h9A111000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_aa_T_13 = _io_resp_aa_T_12; // @[Parameters.scala:137:46]
wire _io_resp_aa_T_14 = _io_resp_aa_T_13 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_aa_T_16 = {1'h0, _io_resp_aa_T_15}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_aa_T_17 = _io_resp_aa_T_16 & 41'h98000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_aa_T_18 = _io_resp_aa_T_17; // @[Parameters.scala:137:46]
wire _io_resp_aa_T_19 = _io_resp_aa_T_18 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_aa_T_21 = {1'h0, _io_resp_aa_T_20}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_aa_T_22 = _io_resp_aa_T_21 & 41'h9A110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_aa_T_23 = _io_resp_aa_T_22; // @[Parameters.scala:137:46]
wire _io_resp_aa_T_24 = _io_resp_aa_T_23 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_aa_T_26 = {1'h0, _io_resp_aa_T_25}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_aa_T_27 = _io_resp_aa_T_26 & 41'h9A111000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_aa_T_28 = _io_resp_aa_T_27; // @[Parameters.scala:137:46]
wire _io_resp_aa_T_29 = _io_resp_aa_T_28 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_aa_T_31 = {1'h0, _io_resp_aa_T_30}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_aa_T_32 = _io_resp_aa_T_31 & 41'h90000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_aa_T_33 = _io_resp_aa_T_32; // @[Parameters.scala:137:46]
wire _io_resp_aa_T_34 = _io_resp_aa_T_33 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_aa_T_35 = _io_resp_aa_T_4 | _io_resp_aa_T_9; // @[Parameters.scala:629:89]
wire _io_resp_aa_T_36 = _io_resp_aa_T_35 | _io_resp_aa_T_14; // @[Parameters.scala:629:89]
wire _io_resp_aa_T_37 = _io_resp_aa_T_36 | _io_resp_aa_T_19; // @[Parameters.scala:629:89]
wire _io_resp_aa_T_38 = _io_resp_aa_T_37 | _io_resp_aa_T_24; // @[Parameters.scala:629:89]
wire _io_resp_aa_T_39 = _io_resp_aa_T_38 | _io_resp_aa_T_29; // @[Parameters.scala:629:89]
wire _io_resp_aa_T_40 = _io_resp_aa_T_39 | _io_resp_aa_T_34; // @[Parameters.scala:629:89]
wire _io_resp_aa_T_46 = _io_resp_aa_T_40; // @[Mux.scala:30:73]
wire [40:0] _io_resp_aa_T_42 = {1'h0, _io_resp_aa_T_41}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_aa_T_43 = _io_resp_aa_T_42 & 41'h9A110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_aa_T_44 = _io_resp_aa_T_43; // @[Parameters.scala:137:46]
wire _io_resp_aa_T_45 = _io_resp_aa_T_44 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_aa_T_48 = _io_resp_aa_T_46; // @[Mux.scala:30:73]
wire _io_resp_aa_WIRE = _io_resp_aa_T_48; // @[Mux.scala:30:73]
assign _io_resp_aa_T_49 = legal_address & _io_resp_aa_WIRE; // @[Mux.scala:30:73]
assign io_resp_aa_0 = _io_resp_aa_T_49; // @[PMA.scala:18:7, :39:19]
wire [40:0] _io_resp_x_T_1 = {1'h0, _io_resp_x_T}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_2 = _io_resp_x_T_1 & 41'h9E113000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_3 = _io_resp_x_T_2; // @[Parameters.scala:137:46]
wire _io_resp_x_T_4 = _io_resp_x_T_3 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_x_T_6 = {1'h0, _io_resp_x_T_5}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_7 = _io_resp_x_T_6 & 41'h9E113000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_8 = _io_resp_x_T_7; // @[Parameters.scala:137:46]
wire _io_resp_x_T_9 = _io_resp_x_T_8 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_x_T_11 = {1'h0, _io_resp_x_T_10}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_12 = _io_resp_x_T_11 & 41'h9E110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_13 = _io_resp_x_T_12; // @[Parameters.scala:137:46]
wire _io_resp_x_T_14 = _io_resp_x_T_13 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_x_T_16 = {1'h0, _io_resp_x_T_15}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_17 = _io_resp_x_T_16 & 41'h9E110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_18 = _io_resp_x_T_17; // @[Parameters.scala:137:46]
wire _io_resp_x_T_19 = _io_resp_x_T_18 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_x_T_21 = {1'h0, _io_resp_x_T_20}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_22 = _io_resp_x_T_21 & 41'h90000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_23 = _io_resp_x_T_22; // @[Parameters.scala:137:46]
wire _io_resp_x_T_24 = _io_resp_x_T_23 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_x_T_25 = _io_resp_x_T_4 | _io_resp_x_T_9; // @[Parameters.scala:629:89]
wire _io_resp_x_T_26 = _io_resp_x_T_25 | _io_resp_x_T_14; // @[Parameters.scala:629:89]
wire _io_resp_x_T_27 = _io_resp_x_T_26 | _io_resp_x_T_19; // @[Parameters.scala:629:89]
wire _io_resp_x_T_28 = _io_resp_x_T_27 | _io_resp_x_T_24; // @[Parameters.scala:629:89]
wire _io_resp_x_T_64 = _io_resp_x_T_28; // @[Mux.scala:30:73]
wire [40:0] _io_resp_x_T_30 = {1'h0, _io_resp_x_T_29}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_31 = _io_resp_x_T_30 & 41'h9E113000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_32 = _io_resp_x_T_31; // @[Parameters.scala:137:46]
wire _io_resp_x_T_33 = _io_resp_x_T_32 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_x_T_35 = {1'h0, _io_resp_x_T_34}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_36 = _io_resp_x_T_35 & 41'h9E103000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_37 = _io_resp_x_T_36; // @[Parameters.scala:137:46]
wire _io_resp_x_T_38 = _io_resp_x_T_37 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_x_T_40 = {1'h0, _io_resp_x_T_39}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_41 = _io_resp_x_T_40 & 41'h9E110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_42 = _io_resp_x_T_41; // @[Parameters.scala:137:46]
wire _io_resp_x_T_43 = _io_resp_x_T_42 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_x_T_45 = {1'h0, _io_resp_x_T_44}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_46 = _io_resp_x_T_45 & 41'h9E113000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_47 = _io_resp_x_T_46; // @[Parameters.scala:137:46]
wire _io_resp_x_T_48 = _io_resp_x_T_47 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_x_T_50 = {1'h0, _io_resp_x_T_49}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_51 = _io_resp_x_T_50 & 41'h9C000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_52 = _io_resp_x_T_51; // @[Parameters.scala:137:46]
wire _io_resp_x_T_53 = _io_resp_x_T_52 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_x_T_55 = {1'h0, _io_resp_x_T_54}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_x_T_56 = _io_resp_x_T_55 & 41'h9E113000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_x_T_57 = _io_resp_x_T_56; // @[Parameters.scala:137:46]
wire _io_resp_x_T_58 = _io_resp_x_T_57 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_x_T_59 = _io_resp_x_T_33 | _io_resp_x_T_38; // @[Parameters.scala:629:89]
wire _io_resp_x_T_60 = _io_resp_x_T_59 | _io_resp_x_T_43; // @[Parameters.scala:629:89]
wire _io_resp_x_T_61 = _io_resp_x_T_60 | _io_resp_x_T_48; // @[Parameters.scala:629:89]
wire _io_resp_x_T_62 = _io_resp_x_T_61 | _io_resp_x_T_53; // @[Parameters.scala:629:89]
wire _io_resp_x_T_63 = _io_resp_x_T_62 | _io_resp_x_T_58; // @[Parameters.scala:629:89]
wire _io_resp_x_T_66 = _io_resp_x_T_64; // @[Mux.scala:30:73]
wire _io_resp_x_WIRE = _io_resp_x_T_66; // @[Mux.scala:30:73]
assign _io_resp_x_T_67 = legal_address & _io_resp_x_WIRE; // @[Mux.scala:30:73]
assign io_resp_x_0 = _io_resp_x_T_67; // @[PMA.scala:18:7, :39:19]
wire [40:0] _io_resp_eff_T_1 = {1'h0, _io_resp_eff_T}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_eff_T_2 = _io_resp_eff_T_1 & 41'h9E112000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_eff_T_3 = _io_resp_eff_T_2; // @[Parameters.scala:137:46]
wire _io_resp_eff_T_4 = _io_resp_eff_T_3 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_eff_T_6 = {1'h0, _io_resp_eff_T_5}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_eff_T_7 = _io_resp_eff_T_6 & 41'h9E103000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_eff_T_8 = _io_resp_eff_T_7; // @[Parameters.scala:137:46]
wire _io_resp_eff_T_9 = _io_resp_eff_T_8 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_eff_T_11 = {1'h0, _io_resp_eff_T_10}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_eff_T_12 = _io_resp_eff_T_11 & 41'h9E110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_eff_T_13 = _io_resp_eff_T_12; // @[Parameters.scala:137:46]
wire _io_resp_eff_T_14 = _io_resp_eff_T_13 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_eff_T_16 = {1'h0, _io_resp_eff_T_15}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_eff_T_17 = _io_resp_eff_T_16 & 41'h9E113000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_eff_T_18 = _io_resp_eff_T_17; // @[Parameters.scala:137:46]
wire _io_resp_eff_T_19 = _io_resp_eff_T_18 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_eff_T_21 = {1'h0, _io_resp_eff_T_20}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_eff_T_22 = _io_resp_eff_T_21 & 41'h9C000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_eff_T_23 = _io_resp_eff_T_22; // @[Parameters.scala:137:46]
wire _io_resp_eff_T_24 = _io_resp_eff_T_23 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_eff_T_26 = {1'h0, _io_resp_eff_T_25}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_eff_T_27 = _io_resp_eff_T_26 & 41'h9E113000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_eff_T_28 = _io_resp_eff_T_27; // @[Parameters.scala:137:46]
wire _io_resp_eff_T_29 = _io_resp_eff_T_28 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_eff_T_30 = _io_resp_eff_T_4 | _io_resp_eff_T_9; // @[Parameters.scala:629:89]
wire _io_resp_eff_T_31 = _io_resp_eff_T_30 | _io_resp_eff_T_14; // @[Parameters.scala:629:89]
wire _io_resp_eff_T_32 = _io_resp_eff_T_31 | _io_resp_eff_T_19; // @[Parameters.scala:629:89]
wire _io_resp_eff_T_33 = _io_resp_eff_T_32 | _io_resp_eff_T_24; // @[Parameters.scala:629:89]
wire _io_resp_eff_T_34 = _io_resp_eff_T_33 | _io_resp_eff_T_29; // @[Parameters.scala:629:89]
wire _io_resp_eff_T_58 = _io_resp_eff_T_34; // @[Mux.scala:30:73]
wire [40:0] _io_resp_eff_T_36 = {1'h0, _io_resp_eff_T_35}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_eff_T_37 = _io_resp_eff_T_36 & 41'h9E113000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_eff_T_38 = _io_resp_eff_T_37; // @[Parameters.scala:137:46]
wire _io_resp_eff_T_39 = _io_resp_eff_T_38 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_eff_T_41 = {1'h0, _io_resp_eff_T_40}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_eff_T_42 = _io_resp_eff_T_41 & 41'h9E110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_eff_T_43 = _io_resp_eff_T_42; // @[Parameters.scala:137:46]
wire _io_resp_eff_T_44 = _io_resp_eff_T_43 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_eff_T_46 = {1'h0, _io_resp_eff_T_45}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_eff_T_47 = _io_resp_eff_T_46 & 41'h9E110000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_eff_T_48 = _io_resp_eff_T_47; // @[Parameters.scala:137:46]
wire _io_resp_eff_T_49 = _io_resp_eff_T_48 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _io_resp_eff_T_51 = {1'h0, _io_resp_eff_T_50}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _io_resp_eff_T_52 = _io_resp_eff_T_51 & 41'h90000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _io_resp_eff_T_53 = _io_resp_eff_T_52; // @[Parameters.scala:137:46]
wire _io_resp_eff_T_54 = _io_resp_eff_T_53 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _io_resp_eff_T_55 = _io_resp_eff_T_39 | _io_resp_eff_T_44; // @[Parameters.scala:629:89]
wire _io_resp_eff_T_56 = _io_resp_eff_T_55 | _io_resp_eff_T_49; // @[Parameters.scala:629:89]
wire _io_resp_eff_T_57 = _io_resp_eff_T_56 | _io_resp_eff_T_54; // @[Parameters.scala:629:89]
wire _io_resp_eff_T_60 = _io_resp_eff_T_58; // @[Mux.scala:30:73]
wire _io_resp_eff_WIRE = _io_resp_eff_T_60; // @[Mux.scala:30:73]
assign _io_resp_eff_T_61 = legal_address & _io_resp_eff_WIRE; // @[Mux.scala:30:73]
assign io_resp_eff_0 = _io_resp_eff_T_61; // @[PMA.scala:18:7, :39:19]
assign io_resp_cacheable = io_resp_cacheable_0; // @[PMA.scala:18:7]
assign io_resp_r = io_resp_r_0; // @[PMA.scala:18:7]
assign io_resp_w = io_resp_w_0; // @[PMA.scala:18:7]
assign io_resp_pp = io_resp_pp_0; // @[PMA.scala:18:7]
assign io_resp_al = io_resp_al_0; // @[PMA.scala:18:7]
assign io_resp_aa = io_resp_aa_0; // @[PMA.scala:18:7]
assign io_resp_x = io_resp_x_0; // @[PMA.scala:18:7]
assign io_resp_eff = io_resp_eff_0; // @[PMA.scala:18:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_366( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid // @[PE.scala:35:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7]
wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60]
wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [7:0] c1; // @[PE.scala:70:15]
wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [7:0] c2; // @[PE.scala:71:15]
wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25]
wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}]
wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16]
wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8]
c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15]
if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8]
c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15]
if (io_in_valid_0) // @[PE.scala:31:7]
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
always @(posedge)
MacUnit_110 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_b_0), // @[PE.scala:31:7]
.io_out_d (io_out_b_0)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputUnit_20( // @[InputUnit.scala:158:7]
input clock, // @[InputUnit.scala:158:7]
input reset, // @[InputUnit.scala:158:7]
output [1:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
output [3:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
output [2:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [3:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_3_0, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_3_1, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_3_2, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_2_0, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_2_1, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_2_2, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_0, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_1, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_2, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_0, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_1, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_2, // @[InputUnit.scala:170:14]
input io_vcalloc_req_ready, // @[InputUnit.scala:170:14]
output io_vcalloc_req_valid, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_5_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_4_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_3_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_3_1, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_3_2, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_1, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_2, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_1, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_2, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_5_0, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_4_0, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_5_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_4_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_0, // @[InputUnit.scala:170:14]
input io_salloc_req_0_ready, // @[InputUnit.scala:170:14]
output io_salloc_req_0_valid, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_5_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_4_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14]
output io_out_0_valid, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14]
output [144:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14]
output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [3:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14]
output [1:0] io_debug_va_stall, // @[InputUnit.scala:170:14]
output [1:0] io_debug_sa_stall, // @[InputUnit.scala:170:14]
input io_in_flit_0_valid, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14]
input [144:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14]
output [2:0] io_in_credit_return, // @[InputUnit.scala:170:14]
output [2:0] io_in_vc_free // @[InputUnit.scala:170:14]
);
wire _GEN; // @[MixedVec.scala:116:9]
wire _GEN_0; // @[MixedVec.scala:116:9]
wire vcalloc_vals_2; // @[InputUnit.scala:266:25, :272:46, :273:29]
wire _GEN_1; // @[MixedVec.scala:116:9]
wire _GEN_2; // @[MixedVec.scala:116:9]
wire vcalloc_vals_1; // @[InputUnit.scala:266:25, :272:46, :273:29]
wire _GEN_3; // @[MixedVec.scala:116:9]
wire _GEN_4; // @[MixedVec.scala:116:9]
wire vcalloc_reqs_0_vc_sel_0_0; // @[MixedVec.scala:116:9]
wire vcalloc_vals_0; // @[InputUnit.scala:266:25, :272:46, :273:29]
wire _salloc_arb_io_in_0_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_1_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_2_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26]
wire [2:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26]
wire _route_arbiter_io_in_1_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_2_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29]
wire [1:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29]
wire _input_buffer_io_deq_0_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28]
wire [144:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28]
wire [144:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28]
wire [144:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28]
reg [2:0] states_0_g; // @[InputUnit.scala:192:19]
reg states_0_vc_sel_5_0; // @[InputUnit.scala:192:19]
reg states_0_vc_sel_4_0; // @[InputUnit.scala:192:19]
reg states_0_vc_sel_0_0; // @[InputUnit.scala:192:19]
reg [1:0] states_0_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_0_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_0_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_0_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_0_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_1_g; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_5_0; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_4_0; // @[InputUnit.scala:192:19]
reg [1:0] states_1_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_1_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_1_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_1_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_1_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_2_g; // @[InputUnit.scala:192:19]
reg states_2_vc_sel_5_0; // @[InputUnit.scala:192:19]
reg states_2_vc_sel_4_0; // @[InputUnit.scala:192:19]
reg [1:0] states_2_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_2_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_2_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_2_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_2_flow_egress_node_id; // @[InputUnit.scala:192:19]
wire _GEN_5 = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30]
wire route_arbiter_io_in_0_valid = states_0_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22]
wire route_arbiter_io_in_1_valid = states_1_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22]
wire _GEN_6 = _route_arbiter_io_in_1_ready & route_arbiter_io_in_1_valid; // @[Decoupled.scala:51:35]
wire route_arbiter_io_in_2_valid = states_2_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22]
wire _GEN_7 = _route_arbiter_io_in_2_ready & route_arbiter_io_in_2_valid; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Metadata.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.constants.MemoryOpConstants
import freechips.rocketchip.util._
object ClientStates {
val width = 2
def Nothing = 0.U(width.W)
def Branch = 1.U(width.W)
def Trunk = 2.U(width.W)
def Dirty = 3.U(width.W)
def hasReadPermission(state: UInt): Bool = state > Nothing
def hasWritePermission(state: UInt): Bool = state > Branch
}
object MemoryOpCategories extends MemoryOpConstants {
def wr = Cat(true.B, true.B) // Op actually writes
def wi = Cat(false.B, true.B) // Future op will write
def rd = Cat(false.B, false.B) // Op only reads
def categorize(cmd: UInt): UInt = {
val cat = Cat(isWrite(cmd), isWriteIntent(cmd))
//assert(cat.isOneOf(wr,wi,rd), "Could not categorize command.")
cat
}
}
/** Stores the client-side coherence information,
* such as permissions on the data and whether the data is dirty.
* Its API can be used to make TileLink messages in response to
* memory operations, cache control oeprations, or Probe messages.
*/
class ClientMetadata extends Bundle {
/** Actual state information stored in this bundle */
val state = UInt(ClientStates.width.W)
/** Metadata equality */
def ===(rhs: UInt): Bool = state === rhs
def ===(rhs: ClientMetadata): Bool = state === rhs.state
def =/=(rhs: ClientMetadata): Bool = !this.===(rhs)
/** Is the block's data present in this cache */
def isValid(dummy: Int = 0): Bool = state > ClientStates.Nothing
/** Determine whether this cmd misses, and the new state (on hit) or param to be sent (on miss) */
private def growStarter(cmd: UInt): (Bool, UInt) = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
MuxTLookup(Cat(c, state), (false.B, 0.U), Seq(
//(effect, am now) -> (was a hit, next)
Cat(rd, Dirty) -> (true.B, Dirty),
Cat(rd, Trunk) -> (true.B, Trunk),
Cat(rd, Branch) -> (true.B, Branch),
Cat(wi, Dirty) -> (true.B, Dirty),
Cat(wi, Trunk) -> (true.B, Trunk),
Cat(wr, Dirty) -> (true.B, Dirty),
Cat(wr, Trunk) -> (true.B, Dirty),
//(effect, am now) -> (was a miss, param)
Cat(rd, Nothing) -> (false.B, NtoB),
Cat(wi, Branch) -> (false.B, BtoT),
Cat(wi, Nothing) -> (false.B, NtoT),
Cat(wr, Branch) -> (false.B, BtoT),
Cat(wr, Nothing) -> (false.B, NtoT)))
}
/** Determine what state to go to after miss based on Grant param
* For now, doesn't depend on state (which may have been Probed).
*/
private def growFinisher(cmd: UInt, param: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
//assert(c === rd || param === toT, "Client was expecting trunk permissions.")
MuxLookup(Cat(c, param), Nothing)(Seq(
//(effect param) -> (next)
Cat(rd, toB) -> Branch,
Cat(rd, toT) -> Trunk,
Cat(wi, toT) -> Trunk,
Cat(wr, toT) -> Dirty))
}
/** Does this cache have permissions on this block sufficient to perform op,
* and what to do next (Acquire message param or updated metadata). */
def onAccess(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = growStarter(cmd)
(r._1, r._2, ClientMetadata(r._2))
}
/** Does a secondary miss on the block require another Acquire message */
def onSecondaryAccess(first_cmd: UInt, second_cmd: UInt): (Bool, Bool, UInt, ClientMetadata, UInt) = {
import MemoryOpCategories._
val r1 = growStarter(first_cmd)
val r2 = growStarter(second_cmd)
val needs_second_acq = isWriteIntent(second_cmd) && !isWriteIntent(first_cmd)
val hit_again = r1._1 && r2._1
val dirties = categorize(second_cmd) === wr
val biggest_grow_param = Mux(dirties, r2._2, r1._2)
val dirtiest_state = ClientMetadata(biggest_grow_param)
val dirtiest_cmd = Mux(dirties, second_cmd, first_cmd)
(needs_second_acq, hit_again, biggest_grow_param, dirtiest_state, dirtiest_cmd)
}
/** Metadata change on a returned Grant */
def onGrant(cmd: UInt, param: UInt): ClientMetadata = ClientMetadata(growFinisher(cmd, param))
/** Determine what state to go to based on Probe param */
private def shrinkHelper(param: UInt): (Bool, UInt, UInt) = {
import ClientStates._
import TLPermissions._
MuxTLookup(Cat(param, state), (false.B, 0.U, 0.U), Seq(
//(wanted, am now) -> (hasDirtyData resp, next)
Cat(toT, Dirty) -> (true.B, TtoT, Trunk),
Cat(toT, Trunk) -> (false.B, TtoT, Trunk),
Cat(toT, Branch) -> (false.B, BtoB, Branch),
Cat(toT, Nothing) -> (false.B, NtoN, Nothing),
Cat(toB, Dirty) -> (true.B, TtoB, Branch),
Cat(toB, Trunk) -> (false.B, TtoB, Branch), // Policy: Don't notify on clean downgrade
Cat(toB, Branch) -> (false.B, BtoB, Branch),
Cat(toB, Nothing) -> (false.B, NtoN, Nothing),
Cat(toN, Dirty) -> (true.B, TtoN, Nothing),
Cat(toN, Trunk) -> (false.B, TtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Branch) -> (false.B, BtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Nothing) -> (false.B, NtoN, Nothing)))
}
/** Translate cache control cmds into Probe param */
private def cmdToPermCap(cmd: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
MuxLookup(cmd, toN)(Seq(
M_FLUSH -> toN,
M_PRODUCE -> toB,
M_CLEAN -> toT))
}
def onCacheControl(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(cmdToPermCap(cmd))
(r._1, r._2, ClientMetadata(r._3))
}
def onProbe(param: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(param)
(r._1, r._2, ClientMetadata(r._3))
}
}
/** Factories for ClientMetadata, including on reset */
object ClientMetadata {
def apply(perm: UInt) = {
val meta = Wire(new ClientMetadata)
meta.state := perm
meta
}
def onReset = ClientMetadata(ClientStates.Nothing)
def maximum = ClientMetadata(ClientStates.Dirty)
}
File Replacement.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import freechips.rocketchip.util.property.cover
abstract class ReplacementPolicy {
def nBits: Int
def perSet: Boolean
def way: UInt
def miss: Unit
def hit: Unit
def access(touch_way: UInt): Unit
def access(touch_ways: Seq[Valid[UInt]]): Unit
def state_read: UInt
def get_next_state(state: UInt, touch_way: UInt): UInt
def get_next_state(state: UInt, touch_ways: Seq[Valid[UInt]]): UInt = {
touch_ways.foldLeft(state)((prev, touch_way) => Mux(touch_way.valid, get_next_state(prev, touch_way.bits), prev))
}
def get_replace_way(state: UInt): UInt
}
object ReplacementPolicy {
def fromString(s: String, n_ways: Int): ReplacementPolicy = s.toLowerCase match {
case "random" => new RandomReplacement(n_ways)
case "lru" => new TrueLRU(n_ways)
case "plru" => new PseudoLRU(n_ways)
case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t")
}
}
class RandomReplacement(n_ways: Int) extends ReplacementPolicy {
private val replace = Wire(Bool())
replace := false.B
def nBits = 16
def perSet = false
private val lfsr = LFSR(nBits, replace)
def state_read = WireDefault(lfsr)
def way = Random(n_ways, lfsr)
def miss = replace := true.B
def hit = {}
def access(touch_way: UInt) = {}
def access(touch_ways: Seq[Valid[UInt]]) = {}
def get_next_state(state: UInt, touch_way: UInt) = 0.U //DontCare
def get_replace_way(state: UInt) = way
}
abstract class SeqReplacementPolicy {
def access(set: UInt): Unit
def update(valid: Bool, hit: Bool, set: UInt, way: UInt): Unit
def way: UInt
}
abstract class SetAssocReplacementPolicy {
def access(set: UInt, touch_way: UInt): Unit
def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]): Unit
def way(set: UInt): UInt
}
class SeqRandom(n_ways: Int) extends SeqReplacementPolicy {
val logic = new RandomReplacement(n_ways)
def access(set: UInt) = { }
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
when (valid && !hit) { logic.miss }
}
def way = logic.way
}
class TrueLRU(n_ways: Int) extends ReplacementPolicy {
// True LRU replacement policy, using a triangular matrix to track which sets are more recently used than others.
// The matrix is packed into a single UInt (or Bits). Example 4-way (6-bits):
// [5] - 3 more recent than 2
// [4] - 3 more recent than 1
// [3] - 2 more recent than 1
// [2] - 3 more recent than 0
// [1] - 2 more recent than 0
// [0] - 1 more recent than 0
def nBits = (n_ways * (n_ways-1)) / 2
def perSet = true
private val state_reg = RegInit(0.U(nBits.W))
def state_read = WireDefault(state_reg)
private def extractMRUVec(state: UInt): Seq[UInt] = {
// Extract per-way information about which higher-indexed ways are more recently used
val moreRecentVec = Wire(Vec(n_ways-1, UInt(n_ways.W)))
var lsb = 0
for (i <- 0 until n_ways-1) {
moreRecentVec(i) := Cat(state(lsb+n_ways-i-2,lsb), 0.U((i+1).W))
lsb = lsb + (n_ways - i - 1)
}
moreRecentVec
}
def get_next_state(state: UInt, touch_way: UInt): UInt = {
val nextState = Wire(Vec(n_ways-1, UInt(n_ways.W)))
val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix
val wayDec = UIntToOH(touch_way, n_ways)
// Compute next value of triangular matrix
// set the touched way as more recent than every other way
nextState.zipWithIndex.map { case (e, i) =>
e := Mux(i.U === touch_way, 0.U(n_ways.W), moreRecentVec(i) | wayDec)
}
nextState.zipWithIndex.tail.foldLeft((nextState.head.apply(n_ways-1,1),0)) { case ((pe,pi),(ce,ci)) => (Cat(ce.apply(n_ways-1,ci+1), pe), ci) }._1
}
def access(touch_way: UInt): Unit = {
state_reg := get_next_state(state_reg, touch_way)
}
def access(touch_ways: Seq[Valid[UInt]]): Unit = {
when (touch_ways.map(_.valid).orR) {
state_reg := get_next_state(state_reg, touch_ways)
}
for (i <- 1 until touch_ways.size) {
cover(PopCount(touch_ways.map(_.valid)) === i.U, s"LRU_UpdateCount$i", s"LRU Update $i simultaneous")
}
}
def get_replace_way(state: UInt): UInt = {
val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix
// For each way, determine if all other ways are more recent
val mruWayDec = (0 until n_ways).map { i =>
val upperMoreRecent = (if (i == n_ways-1) true.B else moreRecentVec(i).apply(n_ways-1,i+1).andR)
val lowerMoreRecent = (if (i == 0) true.B else moreRecentVec.map(e => !e(i)).reduce(_ && _))
upperMoreRecent && lowerMoreRecent
}
OHToUInt(mruWayDec)
}
def way = get_replace_way(state_reg)
def miss = access(way)
def hit = {}
@deprecated("replace 'replace' with 'way' from abstract class ReplacementPolicy","Rocket Chip 2020.05")
def replace: UInt = way
}
class PseudoLRU(n_ways: Int) extends ReplacementPolicy {
// Pseudo-LRU tree algorithm: https://en.wikipedia.org/wiki/Pseudo-LRU#Tree-PLRU
//
//
// - bits storage example for 4-way PLRU binary tree:
// bit[2]: ways 3+2 older than ways 1+0
// / \
// bit[1]: way 3 older than way 2 bit[0]: way 1 older than way 0
//
//
// - bits storage example for 3-way PLRU binary tree:
// bit[1]: way 2 older than ways 1+0
// \
// bit[0]: way 1 older than way 0
//
//
// - bits storage example for 8-way PLRU binary tree:
// bit[6]: ways 7-4 older than ways 3-0
// / \
// bit[5]: ways 7+6 > 5+4 bit[2]: ways 3+2 > 1+0
// / \ / \
// bit[4]: way 7>6 bit[3]: way 5>4 bit[1]: way 3>2 bit[0]: way 1>0
def nBits = n_ways - 1
def perSet = true
private val state_reg = if (nBits == 0) Reg(UInt(0.W)) else RegInit(0.U(nBits.W))
def state_read = WireDefault(state_reg)
def access(touch_way: UInt): Unit = {
state_reg := get_next_state(state_reg, touch_way)
}
def access(touch_ways: Seq[Valid[UInt]]): Unit = {
when (touch_ways.map(_.valid).orR) {
state_reg := get_next_state(state_reg, touch_ways)
}
for (i <- 1 until touch_ways.size) {
cover(PopCount(touch_ways.map(_.valid)) === i.U, s"PLRU_UpdateCount$i", s"PLRU Update $i simultaneous")
}
}
/** @param state state_reg bits for this sub-tree
* @param touch_way touched way encoded value bits for this sub-tree
* @param tree_nways number of ways in this sub-tree
*/
def get_next_state(state: UInt, touch_way: UInt, tree_nways: Int): UInt = {
require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways")
require(touch_way.getWidth == (log2Ceil(tree_nways) max 1), s"wrong encoded way width ${touch_way.getWidth} for $tree_nways ways")
if (tree_nways > 2) {
// we are at a branching node in the tree, so recurse
val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree
val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree
val set_left_older = !touch_way(log2Ceil(tree_nways)-1)
val left_subtree_state = state.extract(tree_nways-3, right_nways-1)
val right_subtree_state = state(right_nways-2, 0)
if (left_nways > 1) {
// we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees
Cat(set_left_older,
Mux(set_left_older,
left_subtree_state, // if setting left sub-tree as older, do NOT recurse into left sub-tree
get_next_state(left_subtree_state, touch_way.extract(log2Ceil(left_nways)-1,0), left_nways)), // recurse left if newer
Mux(set_left_older,
get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer
right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree
} else {
// we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree
Cat(set_left_older,
Mux(set_left_older,
get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer
right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree
}
} else if (tree_nways == 2) {
// we are at a leaf node at the end of the tree, so set the single state bit opposite of the lsb of the touched way encoded value
!touch_way(0)
} else { // tree_nways <= 1
// we are at an empty node in an empty tree for 1 way, so return single zero bit for Chisel (no zero-width wires)
0.U(1.W)
}
}
def get_next_state(state: UInt, touch_way: UInt): UInt = {
val touch_way_sized = if (touch_way.getWidth < log2Ceil(n_ways)) touch_way.padTo (log2Ceil(n_ways))
else touch_way.extract(log2Ceil(n_ways)-1,0)
get_next_state(state, touch_way_sized, n_ways)
}
/** @param state state_reg bits for this sub-tree
* @param tree_nways number of ways in this sub-tree
*/
def get_replace_way(state: UInt, tree_nways: Int): UInt = {
require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways")
// this algorithm recursively descends the binary tree, filling in the way-to-replace encoded value from msb to lsb
if (tree_nways > 2) {
// we are at a branching node in the tree, so recurse
val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree
val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree
val left_subtree_older = state(tree_nways-2)
val left_subtree_state = state.extract(tree_nways-3, right_nways-1)
val right_subtree_state = state(right_nways-2, 0)
if (left_nways > 1) {
// we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees
Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value
Mux(left_subtree_older, // if left sub-tree is older, recurse left, else recurse right
get_replace_way(left_subtree_state, left_nways), // recurse left
get_replace_way(right_subtree_state, right_nways))) // recurse right
} else {
// we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree
Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value
Mux(left_subtree_older, // if left sub-tree is older, return and do not recurse right
0.U(1.W),
get_replace_way(right_subtree_state, right_nways))) // recurse right
}
} else if (tree_nways == 2) {
// we are at a leaf node at the end of the tree, so just return the single state bit as lsb of the way-to-replace encoded value
state(0)
} else { // tree_nways <= 1
// we are at an empty node in an unbalanced tree for non-power-of-2 ways, so return single zero bit as lsb of the way-to-replace encoded value
0.U(1.W)
}
}
def get_replace_way(state: UInt): UInt = get_replace_way(state, n_ways)
def way = get_replace_way(state_reg)
def miss = access(way)
def hit = {}
}
class SeqPLRU(n_sets: Int, n_ways: Int) extends SeqReplacementPolicy {
val logic = new PseudoLRU(n_ways)
val state = SyncReadMem(n_sets, UInt(logic.nBits.W))
val current_state = Wire(UInt(logic.nBits.W))
val next_state = Wire(UInt(logic.nBits.W))
val plru_way = logic.get_replace_way(current_state)
def access(set: UInt) = {
current_state := state.read(set)
}
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
val update_way = Mux(hit, way, plru_way)
next_state := logic.get_next_state(current_state, update_way)
when (valid) { state.write(set, next_state) }
}
def way = plru_way
}
class SetAssocLRU(n_sets: Int, n_ways: Int, policy: String) extends SetAssocReplacementPolicy {
val logic = policy.toLowerCase match {
case "plru" => new PseudoLRU(n_ways)
case "lru" => new TrueLRU(n_ways)
case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t")
}
val state_vec =
if (logic.nBits == 0) Reg(Vec(n_sets, UInt(logic.nBits.W))) // Work around elaboration error on following line
else RegInit(VecInit(Seq.fill(n_sets)(0.U(logic.nBits.W))))
def access(set: UInt, touch_way: UInt) = {
state_vec(set) := logic.get_next_state(state_vec(set), touch_way)
}
def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]) = {
require(sets.size == touch_ways.size, "internal consistency check: should be same number of simultaneous updates for sets and touch_ways")
for (set <- 0 until n_sets) {
val set_touch_ways = (sets zip touch_ways).map { case (touch_set, touch_way) =>
Pipe(touch_way.valid && (touch_set === set.U), touch_way.bits, 0)}
when (set_touch_ways.map(_.valid).orR) {
state_vec(set) := logic.get_next_state(state_vec(set), set_touch_ways)
}
}
}
def way(set: UInt) = logic.get_replace_way(state_vec(set))
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class PLRUTest(n_ways: Int, timeout: Int = 500) extends UnitTest(timeout) {
val plru = new PseudoLRU(n_ways)
// step
io.finished := RegNext(true.B, false.B)
val get_replace_ways = (0 until (1 << (n_ways-1))).map(state =>
plru.get_replace_way(state = state.U((n_ways-1).W)))
val get_next_states = (0 until (1 << (n_ways-1))).map(state => (0 until n_ways).map(way =>
plru.get_next_state (state = state.U((n_ways-1).W), touch_way = way.U(log2Ceil(n_ways).W))))
n_ways match {
case 2 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_next_states(0)(0) === 1.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=1 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 0.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=0 actual=%d", get_next_states(0)(1))
assert(get_next_states(1)(0) === 1.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=1 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 0.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=0 actual=%d", get_next_states(1)(1))
}
case 3 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_replace_ways(2) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=2 actual=%d", get_replace_ways(2))
assert(get_replace_ways(3) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=2 actual=%d", get_replace_ways(3))
assert(get_next_states(0)(0) === 3.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=3 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 2.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=2 actual=%d", get_next_states(0)(1))
assert(get_next_states(0)(2) === 0.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=0 actual=%d", get_next_states(0)(2))
assert(get_next_states(1)(0) === 3.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=3 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 2.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=2 actual=%d", get_next_states(1)(1))
assert(get_next_states(1)(2) === 1.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=1 actual=%d", get_next_states(1)(2))
assert(get_next_states(2)(0) === 3.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=3 actual=%d", get_next_states(2)(0))
assert(get_next_states(2)(1) === 2.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=2 actual=%d", get_next_states(2)(1))
assert(get_next_states(2)(2) === 0.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=0 actual=%d", get_next_states(2)(2))
assert(get_next_states(3)(0) === 3.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=3 actual=%d", get_next_states(3)(0))
assert(get_next_states(3)(1) === 2.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=2 actual=%d", get_next_states(3)(1))
assert(get_next_states(3)(2) === 1.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=1 actual=%d", get_next_states(3)(2))
}
case 4 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_replace_ways(2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=0 actual=%d", get_replace_ways(2))
assert(get_replace_ways(3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=1 actual=%d", get_replace_ways(3))
assert(get_replace_ways(4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=4: expected=2 actual=%d", get_replace_ways(4))
assert(get_replace_ways(5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=5: expected=2 actual=%d", get_replace_ways(5))
assert(get_replace_ways(6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=6: expected=3 actual=%d", get_replace_ways(6))
assert(get_replace_ways(7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=7: expected=3 actual=%d", get_replace_ways(7))
assert(get_next_states(0)(0) === 5.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=5 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 4.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=4 actual=%d", get_next_states(0)(1))
assert(get_next_states(0)(2) === 2.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=2 actual=%d", get_next_states(0)(2))
assert(get_next_states(0)(3) === 0.U(plru.nBits.W), s"get_next_state state=0 way=3: expected=0 actual=%d", get_next_states(0)(3))
assert(get_next_states(1)(0) === 5.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=5 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 4.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=4 actual=%d", get_next_states(1)(1))
assert(get_next_states(1)(2) === 3.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=3 actual=%d", get_next_states(1)(2))
assert(get_next_states(1)(3) === 1.U(plru.nBits.W), s"get_next_state state=1 way=3: expected=1 actual=%d", get_next_states(1)(3))
assert(get_next_states(2)(0) === 7.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=7 actual=%d", get_next_states(2)(0))
assert(get_next_states(2)(1) === 6.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=6 actual=%d", get_next_states(2)(1))
assert(get_next_states(2)(2) === 2.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=2 actual=%d", get_next_states(2)(2))
assert(get_next_states(2)(3) === 0.U(plru.nBits.W), s"get_next_state state=2 way=3: expected=0 actual=%d", get_next_states(2)(3))
assert(get_next_states(3)(0) === 7.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=7 actual=%d", get_next_states(3)(0))
assert(get_next_states(3)(1) === 6.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=6 actual=%d", get_next_states(3)(1))
assert(get_next_states(3)(2) === 3.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=3 actual=%d", get_next_states(3)(2))
assert(get_next_states(3)(3) === 1.U(plru.nBits.W), s"get_next_state state=3 way=3: expected=1 actual=%d", get_next_states(3)(3))
assert(get_next_states(4)(0) === 5.U(plru.nBits.W), s"get_next_state state=4 way=0: expected=5 actual=%d", get_next_states(4)(0))
assert(get_next_states(4)(1) === 4.U(plru.nBits.W), s"get_next_state state=4 way=1: expected=4 actual=%d", get_next_states(4)(1))
assert(get_next_states(4)(2) === 2.U(plru.nBits.W), s"get_next_state state=4 way=2: expected=2 actual=%d", get_next_states(4)(2))
assert(get_next_states(4)(3) === 0.U(plru.nBits.W), s"get_next_state state=4 way=3: expected=0 actual=%d", get_next_states(4)(3))
assert(get_next_states(5)(0) === 5.U(plru.nBits.W), s"get_next_state state=5 way=0: expected=5 actual=%d", get_next_states(5)(0))
assert(get_next_states(5)(1) === 4.U(plru.nBits.W), s"get_next_state state=5 way=1: expected=4 actual=%d", get_next_states(5)(1))
assert(get_next_states(5)(2) === 3.U(plru.nBits.W), s"get_next_state state=5 way=2: expected=3 actual=%d", get_next_states(5)(2))
assert(get_next_states(5)(3) === 1.U(plru.nBits.W), s"get_next_state state=5 way=3: expected=1 actual=%d", get_next_states(5)(3))
assert(get_next_states(6)(0) === 7.U(plru.nBits.W), s"get_next_state state=6 way=0: expected=7 actual=%d", get_next_states(6)(0))
assert(get_next_states(6)(1) === 6.U(plru.nBits.W), s"get_next_state state=6 way=1: expected=6 actual=%d", get_next_states(6)(1))
assert(get_next_states(6)(2) === 2.U(plru.nBits.W), s"get_next_state state=6 way=2: expected=2 actual=%d", get_next_states(6)(2))
assert(get_next_states(6)(3) === 0.U(plru.nBits.W), s"get_next_state state=6 way=3: expected=0 actual=%d", get_next_states(6)(3))
assert(get_next_states(7)(0) === 7.U(plru.nBits.W), s"get_next_state state=7 way=0: expected=7 actual=%d", get_next_states(7)(0))
assert(get_next_states(7)(1) === 6.U(plru.nBits.W), s"get_next_state state=7 way=5: expected=6 actual=%d", get_next_states(7)(1))
assert(get_next_states(7)(2) === 3.U(plru.nBits.W), s"get_next_state state=7 way=2: expected=3 actual=%d", get_next_states(7)(2))
assert(get_next_states(7)(3) === 1.U(plru.nBits.W), s"get_next_state state=7 way=3: expected=1 actual=%d", get_next_states(7)(3))
}
case 5 => {
assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0))
assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1))
assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2))
assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3))
assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4))
assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5))
assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6))
assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7))
assert(get_replace_ways( 8) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=4 actual=%d", get_replace_ways( 8))
assert(get_replace_ways( 9) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=4 actual=%d", get_replace_ways( 9))
assert(get_replace_ways(10) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=4 actual=%d", get_replace_ways(10))
assert(get_replace_ways(11) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=4 actual=%d", get_replace_ways(11))
assert(get_replace_ways(12) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=4 actual=%d", get_replace_ways(12))
assert(get_replace_ways(13) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=4 actual=%d", get_replace_ways(13))
assert(get_replace_ways(14) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=4 actual=%d", get_replace_ways(14))
assert(get_replace_ways(15) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=4 actual=%d", get_replace_ways(15))
assert(get_next_states( 0)(0) === 13.U(plru.nBits.W), s"get_next_state state=00 way=0: expected=13 actual=%d", get_next_states( 0)(0))
assert(get_next_states( 0)(1) === 12.U(plru.nBits.W), s"get_next_state state=00 way=1: expected=12 actual=%d", get_next_states( 0)(1))
assert(get_next_states( 0)(2) === 10.U(plru.nBits.W), s"get_next_state state=00 way=2: expected=10 actual=%d", get_next_states( 0)(2))
assert(get_next_states( 0)(3) === 8.U(plru.nBits.W), s"get_next_state state=00 way=3: expected=08 actual=%d", get_next_states( 0)(3))
assert(get_next_states( 0)(4) === 0.U(plru.nBits.W), s"get_next_state state=00 way=4: expected=00 actual=%d", get_next_states( 0)(4))
assert(get_next_states( 1)(0) === 13.U(plru.nBits.W), s"get_next_state state=01 way=0: expected=13 actual=%d", get_next_states( 1)(0))
assert(get_next_states( 1)(1) === 12.U(plru.nBits.W), s"get_next_state state=01 way=1: expected=12 actual=%d", get_next_states( 1)(1))
assert(get_next_states( 1)(2) === 11.U(plru.nBits.W), s"get_next_state state=01 way=2: expected=11 actual=%d", get_next_states( 1)(2))
assert(get_next_states( 1)(3) === 9.U(plru.nBits.W), s"get_next_state state=01 way=3: expected=09 actual=%d", get_next_states( 1)(3))
assert(get_next_states( 1)(4) === 1.U(plru.nBits.W), s"get_next_state state=01 way=4: expected=01 actual=%d", get_next_states( 1)(4))
assert(get_next_states( 2)(0) === 15.U(plru.nBits.W), s"get_next_state state=02 way=0: expected=15 actual=%d", get_next_states( 2)(0))
assert(get_next_states( 2)(1) === 14.U(plru.nBits.W), s"get_next_state state=02 way=1: expected=14 actual=%d", get_next_states( 2)(1))
assert(get_next_states( 2)(2) === 10.U(plru.nBits.W), s"get_next_state state=02 way=2: expected=10 actual=%d", get_next_states( 2)(2))
assert(get_next_states( 2)(3) === 8.U(plru.nBits.W), s"get_next_state state=02 way=3: expected=08 actual=%d", get_next_states( 2)(3))
assert(get_next_states( 2)(4) === 2.U(plru.nBits.W), s"get_next_state state=02 way=4: expected=02 actual=%d", get_next_states( 2)(4))
assert(get_next_states( 3)(0) === 15.U(plru.nBits.W), s"get_next_state state=03 way=0: expected=15 actual=%d", get_next_states( 3)(0))
assert(get_next_states( 3)(1) === 14.U(plru.nBits.W), s"get_next_state state=03 way=1: expected=14 actual=%d", get_next_states( 3)(1))
assert(get_next_states( 3)(2) === 11.U(plru.nBits.W), s"get_next_state state=03 way=2: expected=11 actual=%d", get_next_states( 3)(2))
assert(get_next_states( 3)(3) === 9.U(plru.nBits.W), s"get_next_state state=03 way=3: expected=09 actual=%d", get_next_states( 3)(3))
assert(get_next_states( 3)(4) === 3.U(plru.nBits.W), s"get_next_state state=03 way=4: expected=03 actual=%d", get_next_states( 3)(4))
assert(get_next_states( 4)(0) === 13.U(plru.nBits.W), s"get_next_state state=04 way=0: expected=13 actual=%d", get_next_states( 4)(0))
assert(get_next_states( 4)(1) === 12.U(plru.nBits.W), s"get_next_state state=04 way=1: expected=12 actual=%d", get_next_states( 4)(1))
assert(get_next_states( 4)(2) === 10.U(plru.nBits.W), s"get_next_state state=04 way=2: expected=10 actual=%d", get_next_states( 4)(2))
assert(get_next_states( 4)(3) === 8.U(plru.nBits.W), s"get_next_state state=04 way=3: expected=08 actual=%d", get_next_states( 4)(3))
assert(get_next_states( 4)(4) === 4.U(plru.nBits.W), s"get_next_state state=04 way=4: expected=04 actual=%d", get_next_states( 4)(4))
assert(get_next_states( 5)(0) === 13.U(plru.nBits.W), s"get_next_state state=05 way=0: expected=13 actual=%d", get_next_states( 5)(0))
assert(get_next_states( 5)(1) === 12.U(plru.nBits.W), s"get_next_state state=05 way=1: expected=12 actual=%d", get_next_states( 5)(1))
assert(get_next_states( 5)(2) === 11.U(plru.nBits.W), s"get_next_state state=05 way=2: expected=11 actual=%d", get_next_states( 5)(2))
assert(get_next_states( 5)(3) === 9.U(plru.nBits.W), s"get_next_state state=05 way=3: expected=09 actual=%d", get_next_states( 5)(3))
assert(get_next_states( 5)(4) === 5.U(plru.nBits.W), s"get_next_state state=05 way=4: expected=05 actual=%d", get_next_states( 5)(4))
assert(get_next_states( 6)(0) === 15.U(plru.nBits.W), s"get_next_state state=06 way=0: expected=15 actual=%d", get_next_states( 6)(0))
assert(get_next_states( 6)(1) === 14.U(plru.nBits.W), s"get_next_state state=06 way=1: expected=14 actual=%d", get_next_states( 6)(1))
assert(get_next_states( 6)(2) === 10.U(plru.nBits.W), s"get_next_state state=06 way=2: expected=10 actual=%d", get_next_states( 6)(2))
assert(get_next_states( 6)(3) === 8.U(plru.nBits.W), s"get_next_state state=06 way=3: expected=08 actual=%d", get_next_states( 6)(3))
assert(get_next_states( 6)(4) === 6.U(plru.nBits.W), s"get_next_state state=06 way=4: expected=06 actual=%d", get_next_states( 6)(4))
assert(get_next_states( 7)(0) === 15.U(plru.nBits.W), s"get_next_state state=07 way=0: expected=15 actual=%d", get_next_states( 7)(0))
assert(get_next_states( 7)(1) === 14.U(plru.nBits.W), s"get_next_state state=07 way=5: expected=14 actual=%d", get_next_states( 7)(1))
assert(get_next_states( 7)(2) === 11.U(plru.nBits.W), s"get_next_state state=07 way=2: expected=11 actual=%d", get_next_states( 7)(2))
assert(get_next_states( 7)(3) === 9.U(plru.nBits.W), s"get_next_state state=07 way=3: expected=09 actual=%d", get_next_states( 7)(3))
assert(get_next_states( 7)(4) === 7.U(plru.nBits.W), s"get_next_state state=07 way=4: expected=07 actual=%d", get_next_states( 7)(4))
assert(get_next_states( 8)(0) === 13.U(plru.nBits.W), s"get_next_state state=08 way=0: expected=13 actual=%d", get_next_states( 8)(0))
assert(get_next_states( 8)(1) === 12.U(plru.nBits.W), s"get_next_state state=08 way=1: expected=12 actual=%d", get_next_states( 8)(1))
assert(get_next_states( 8)(2) === 10.U(plru.nBits.W), s"get_next_state state=08 way=2: expected=10 actual=%d", get_next_states( 8)(2))
assert(get_next_states( 8)(3) === 8.U(plru.nBits.W), s"get_next_state state=08 way=3: expected=08 actual=%d", get_next_states( 8)(3))
assert(get_next_states( 8)(4) === 0.U(plru.nBits.W), s"get_next_state state=08 way=4: expected=00 actual=%d", get_next_states( 8)(4))
assert(get_next_states( 9)(0) === 13.U(plru.nBits.W), s"get_next_state state=09 way=0: expected=13 actual=%d", get_next_states( 9)(0))
assert(get_next_states( 9)(1) === 12.U(plru.nBits.W), s"get_next_state state=09 way=1: expected=12 actual=%d", get_next_states( 9)(1))
assert(get_next_states( 9)(2) === 11.U(plru.nBits.W), s"get_next_state state=09 way=2: expected=11 actual=%d", get_next_states( 9)(2))
assert(get_next_states( 9)(3) === 9.U(plru.nBits.W), s"get_next_state state=09 way=3: expected=09 actual=%d", get_next_states( 9)(3))
assert(get_next_states( 9)(4) === 1.U(plru.nBits.W), s"get_next_state state=09 way=4: expected=01 actual=%d", get_next_states( 9)(4))
assert(get_next_states(10)(0) === 15.U(plru.nBits.W), s"get_next_state state=10 way=0: expected=15 actual=%d", get_next_states(10)(0))
assert(get_next_states(10)(1) === 14.U(plru.nBits.W), s"get_next_state state=10 way=1: expected=14 actual=%d", get_next_states(10)(1))
assert(get_next_states(10)(2) === 10.U(plru.nBits.W), s"get_next_state state=10 way=2: expected=10 actual=%d", get_next_states(10)(2))
assert(get_next_states(10)(3) === 8.U(plru.nBits.W), s"get_next_state state=10 way=3: expected=08 actual=%d", get_next_states(10)(3))
assert(get_next_states(10)(4) === 2.U(plru.nBits.W), s"get_next_state state=10 way=4: expected=02 actual=%d", get_next_states(10)(4))
assert(get_next_states(11)(0) === 15.U(plru.nBits.W), s"get_next_state state=11 way=0: expected=15 actual=%d", get_next_states(11)(0))
assert(get_next_states(11)(1) === 14.U(plru.nBits.W), s"get_next_state state=11 way=1: expected=14 actual=%d", get_next_states(11)(1))
assert(get_next_states(11)(2) === 11.U(plru.nBits.W), s"get_next_state state=11 way=2: expected=11 actual=%d", get_next_states(11)(2))
assert(get_next_states(11)(3) === 9.U(plru.nBits.W), s"get_next_state state=11 way=3: expected=09 actual=%d", get_next_states(11)(3))
assert(get_next_states(11)(4) === 3.U(plru.nBits.W), s"get_next_state state=11 way=4: expected=03 actual=%d", get_next_states(11)(4))
assert(get_next_states(12)(0) === 13.U(plru.nBits.W), s"get_next_state state=12 way=0: expected=13 actual=%d", get_next_states(12)(0))
assert(get_next_states(12)(1) === 12.U(plru.nBits.W), s"get_next_state state=12 way=1: expected=12 actual=%d", get_next_states(12)(1))
assert(get_next_states(12)(2) === 10.U(plru.nBits.W), s"get_next_state state=12 way=2: expected=10 actual=%d", get_next_states(12)(2))
assert(get_next_states(12)(3) === 8.U(plru.nBits.W), s"get_next_state state=12 way=3: expected=08 actual=%d", get_next_states(12)(3))
assert(get_next_states(12)(4) === 4.U(plru.nBits.W), s"get_next_state state=12 way=4: expected=04 actual=%d", get_next_states(12)(4))
assert(get_next_states(13)(0) === 13.U(plru.nBits.W), s"get_next_state state=13 way=0: expected=13 actual=%d", get_next_states(13)(0))
assert(get_next_states(13)(1) === 12.U(plru.nBits.W), s"get_next_state state=13 way=1: expected=12 actual=%d", get_next_states(13)(1))
assert(get_next_states(13)(2) === 11.U(plru.nBits.W), s"get_next_state state=13 way=2: expected=11 actual=%d", get_next_states(13)(2))
assert(get_next_states(13)(3) === 9.U(plru.nBits.W), s"get_next_state state=13 way=3: expected=09 actual=%d", get_next_states(13)(3))
assert(get_next_states(13)(4) === 5.U(plru.nBits.W), s"get_next_state state=13 way=4: expected=05 actual=%d", get_next_states(13)(4))
assert(get_next_states(14)(0) === 15.U(plru.nBits.W), s"get_next_state state=14 way=0: expected=15 actual=%d", get_next_states(14)(0))
assert(get_next_states(14)(1) === 14.U(plru.nBits.W), s"get_next_state state=14 way=1: expected=14 actual=%d", get_next_states(14)(1))
assert(get_next_states(14)(2) === 10.U(plru.nBits.W), s"get_next_state state=14 way=2: expected=10 actual=%d", get_next_states(14)(2))
assert(get_next_states(14)(3) === 8.U(plru.nBits.W), s"get_next_state state=14 way=3: expected=08 actual=%d", get_next_states(14)(3))
assert(get_next_states(14)(4) === 6.U(plru.nBits.W), s"get_next_state state=14 way=4: expected=06 actual=%d", get_next_states(14)(4))
assert(get_next_states(15)(0) === 15.U(plru.nBits.W), s"get_next_state state=15 way=0: expected=15 actual=%d", get_next_states(15)(0))
assert(get_next_states(15)(1) === 14.U(plru.nBits.W), s"get_next_state state=15 way=5: expected=14 actual=%d", get_next_states(15)(1))
assert(get_next_states(15)(2) === 11.U(plru.nBits.W), s"get_next_state state=15 way=2: expected=11 actual=%d", get_next_states(15)(2))
assert(get_next_states(15)(3) === 9.U(plru.nBits.W), s"get_next_state state=15 way=3: expected=09 actual=%d", get_next_states(15)(3))
assert(get_next_states(15)(4) === 7.U(plru.nBits.W), s"get_next_state state=15 way=4: expected=07 actual=%d", get_next_states(15)(4))
}
case 6 => {
assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0))
assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1))
assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2))
assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3))
assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4))
assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5))
assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6))
assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7))
assert(get_replace_ways( 8) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=0 actual=%d", get_replace_ways( 8))
assert(get_replace_ways( 9) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=1 actual=%d", get_replace_ways( 9))
assert(get_replace_ways(10) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=0 actual=%d", get_replace_ways(10))
assert(get_replace_ways(11) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=1 actual=%d", get_replace_ways(11))
assert(get_replace_ways(12) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=2 actual=%d", get_replace_ways(12))
assert(get_replace_ways(13) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=2 actual=%d", get_replace_ways(13))
assert(get_replace_ways(14) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=3 actual=%d", get_replace_ways(14))
assert(get_replace_ways(15) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=3 actual=%d", get_replace_ways(15))
assert(get_replace_ways(16) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=16: expected=4 actual=%d", get_replace_ways(16))
assert(get_replace_ways(17) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=17: expected=4 actual=%d", get_replace_ways(17))
assert(get_replace_ways(18) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=18: expected=4 actual=%d", get_replace_ways(18))
assert(get_replace_ways(19) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=19: expected=4 actual=%d", get_replace_ways(19))
assert(get_replace_ways(20) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=20: expected=4 actual=%d", get_replace_ways(20))
assert(get_replace_ways(21) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=21: expected=4 actual=%d", get_replace_ways(21))
assert(get_replace_ways(22) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=22: expected=4 actual=%d", get_replace_ways(22))
assert(get_replace_ways(23) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=23: expected=4 actual=%d", get_replace_ways(23))
assert(get_replace_ways(24) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=24: expected=5 actual=%d", get_replace_ways(24))
assert(get_replace_ways(25) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=25: expected=5 actual=%d", get_replace_ways(25))
assert(get_replace_ways(26) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=26: expected=5 actual=%d", get_replace_ways(26))
assert(get_replace_ways(27) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=27: expected=5 actual=%d", get_replace_ways(27))
assert(get_replace_ways(28) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=28: expected=5 actual=%d", get_replace_ways(28))
assert(get_replace_ways(29) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=29: expected=5 actual=%d", get_replace_ways(29))
assert(get_replace_ways(30) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=30: expected=5 actual=%d", get_replace_ways(30))
assert(get_replace_ways(31) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=31: expected=5 actual=%d", get_replace_ways(31))
}
case _ => throw new IllegalArgumentException(s"no test pattern found for n_ways=$n_ways")
}
}
File HellaCache.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3.{dontTouch, _}
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.bundlebridge._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.AMBAProtField
import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType}
import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters}
import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata}
import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle}
import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt}
import scala.collection.mutable.ListBuffer
case class DCacheParams(
nSets: Int = 64,
nWays: Int = 4,
rowBits: Int = 64,
subWordBits: Option[Int] = None,
replacementPolicy: String = "random",
nTLBSets: Int = 1,
nTLBWays: Int = 32,
nTLBBasePageSectors: Int = 4,
nTLBSuperpages: Int = 4,
tagECC: Option[String] = None,
dataECC: Option[String] = None,
dataECCBytes: Int = 1,
nMSHRs: Int = 1,
nSDQ: Int = 17,
nRPQ: Int = 16,
nMMIOs: Int = 1,
blockBytes: Int = 64,
separateUncachedResp: Boolean = false,
acquireBeforeRelease: Boolean = false,
pipelineWayMux: Boolean = false,
clockGate: Boolean = false,
scratch: Option[BigInt] = None) extends L1CacheParams {
def tagCode: Code = Code.fromString(tagECC)
def dataCode: Code = Code.fromString(dataECC)
def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0)
def replacement = new RandomReplacement(nWays)
def silentDrop: Boolean = !acquireBeforeRelease
require((!scratch.isDefined || nWays == 1),
"Scratchpad only allowed in direct-mapped cache.")
require((!scratch.isDefined || nMSHRs == 0),
"Scratchpad only allowed in blocking cache.")
if (scratch.isEmpty)
require(isPow2(nSets), s"nSets($nSets) must be pow2")
}
trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters {
val cacheParams = tileParams.dcache.get
val cfg = cacheParams
def wordBits = coreDataBits
def wordBytes = coreDataBytes
def subWordBits = cacheParams.subWordBits.getOrElse(wordBits)
def subWordBytes = subWordBits / 8
def wordOffBits = log2Up(wordBytes)
def beatBytes = cacheBlockBytes / cacheDataBeats
def beatWords = beatBytes / wordBytes
def beatOffBits = log2Up(beatBytes)
def idxMSB = untagBits-1
def idxLSB = blockOffBits
def offsetmsb = idxLSB-1
def offsetlsb = wordOffBits
def rowWords = rowBits/wordBits
def doNarrowRead = coreDataBits * nWays % rowBits == 0
def eccBytes = cacheParams.dataECCBytes
val eccBits = cacheParams.dataECCBytes * 8
val encBits = cacheParams.dataCode.width(eccBits)
val encWordBits = encBits * (wordBits / eccBits)
def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only
def encRowBits = encDataBits*rowWords
def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed
def lrscBackoff = 3 // disallow LRSC reacquisition briefly
def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant
def nIOMSHRs = cacheParams.nMMIOs
def maxUncachedInFlight = cacheParams.nMMIOs
def dataScratchpadSize = cacheParams.dataScratchpadBytes
require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)")
if (!usingDataScratchpad)
require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)")
// would need offset addr for puts if data width < xlen
require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)")
}
abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module
with HasL1HellaCacheParameters
abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p)
with HasL1HellaCacheParameters
/** Bundle definitions for HellaCache interfaces */
trait HasCoreMemOp extends HasL1HellaCacheParameters {
val addr = UInt(coreMaxAddrBits.W)
val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W))
val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W)
val cmd = UInt(M_SZ.W)
val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W)
val signed = Bool()
val dprv = UInt(PRV.SZ.W)
val dv = Bool()
}
trait HasCoreData extends HasCoreParameters {
val data = UInt(coreDataBits.W)
val mask = UInt(coreDataBytes.W)
}
class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp {
val phys = Bool()
val no_resp = Bool() // The dcache may omit generating a response for this request
val no_alloc = Bool()
val no_xcpt = Bool()
}
class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData
class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreMemOp
with HasCoreData {
val replay = Bool()
val has_data = Bool()
val data_word_bypass = UInt(coreDataBits.W)
val data_raw = UInt(coreDataBits.W)
val store_data = UInt(coreDataBits.W)
}
class AlignmentExceptions extends Bundle {
val ld = Bool()
val st = Bool()
}
class HellaCacheExceptions extends Bundle {
val ma = new AlignmentExceptions
val pf = new AlignmentExceptions
val gf = new AlignmentExceptions
val ae = new AlignmentExceptions
}
class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData
class HellaCachePerfEvents extends Bundle {
val acquire = Bool()
val release = Bool()
val grant = Bool()
val tlbMiss = Bool()
val blocked = Bool()
val canAcceptStoreThenLoad = Bool()
val canAcceptStoreThenRMW = Bool()
val canAcceptLoadThenLoad = Bool()
val storeBufferEmptyAfterLoad = Bool()
val storeBufferEmptyAfterStore = Bool()
}
// interface between D$ and processor/DTLB
class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) {
val req = Decoupled(new HellaCacheReq)
val s1_kill = Output(Bool()) // kill previous cycle's req
val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req
val s2_nack = Input(Bool()) // req from two cycles ago is rejected
val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint)
val s2_kill = Output(Bool()) // kill req from two cycles ago
val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO
val s2_paddr = Input(UInt(paddrBits.W)) // translated address
val resp = Flipped(Valid(new HellaCacheResp))
val replay_next = Input(Bool())
val s2_xcpt = Input(new HellaCacheExceptions)
val s2_gpa = Input(UInt(vaddrBitsExtended.W))
val s2_gpa_is_pte = Input(Bool())
val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp)))
val ordered = Input(Bool())
val store_pending = Input(Bool()) // there is a store in a store buffer somewhere
val perf = Input(new HellaCachePerfEvents())
val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself?
val clock_enabled = Input(Bool()) // is D$ currently being clocked?
}
/** Base classes for Diplomatic TL2 HellaCaches */
abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule
with HasNonDiplomaticTileParameters {
protected val cfg = tileParams.dcache.get
protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache",
sourceId = IdRange(0, 1 max cfg.nMSHRs),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes))))
protected def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache MMIO",
sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs),
requestFifo = true))
def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
clients = cacheClientParameters ++ mmioClientParameters,
minLatency = 1,
requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField())))))
val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val module: HellaCacheModule
def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT)
def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits)
require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$")
}
class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) {
val cpu = Flipped(new HellaCacheIO)
val ptw = new TLBPTWIO()
val errors = new DCacheErrors
val tlb_port = new DCacheTLBPort
}
class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer)
with HasL1HellaCacheParameters {
implicit val edge: TLEdgeOut = outer.node.edges.out(0)
val (tl_out, _) = outer.node.out(0)
val io = IO(new HellaCacheBundle)
val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle)
val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle)
dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals
dontTouch(io.cpu.s1_data)
require(rowBits == edge.bundle.dataBits)
private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile)
fifoManagers.foreach { m =>
require (m.fifoId == fifoManagers.head.fifoId,
s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+
s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}")
}
}
/** Support overriding which HellaCache is instantiated */
case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply)
object HellaCacheFactory {
def apply(tile: BaseTile)(p: Parameters): HellaCache = {
if (tile.tileParams.dcache.get.nMSHRs == 0)
new DCache(tile.tileId, tile.crossing)(p)
else
new NonBlockingDCache(tile.tileId)(p)
}
}
/** Mix-ins for constructing tiles that have a HellaCache */
trait HasHellaCache { this: BaseTile =>
val module: HasHellaCacheModule
implicit val p: Parameters
var nDCachePorts = 0
lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p))
tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node
dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode }
dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode }
InModuleBody {
dcache.module.io.tlb_port := DontCare
}
}
trait HasHellaCacheModule {
val outer: HasHellaCache with HasTileParameters
implicit val p: Parameters
val dcachePorts = ListBuffer[HellaCacheIO]()
val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p))
outer.dcache.module.io.cpu <> dcacheArb.io.mem
}
/** Metadata array used for all HellaCaches */
class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val coh = new ClientMetadata
val tag = UInt(tagBits.W)
}
object L1Metadata {
def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = {
val meta = Wire(new L1Metadata)
meta.tag := tag
meta.coh := coh
meta
}
}
class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val idx = UInt(idxBits.W)
val way_en = UInt(nWays.W)
val tag = UInt(tagBits.W)
}
class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) {
val data = new L1Metadata
}
class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val rstVal = onReset()
val io = IO(new Bundle {
val read = Flipped(Decoupled(new L1MetaReadReq))
val write = Flipped(Decoupled(new L1MetaWriteReq))
val resp = Output(Vec(nWays, rstVal.cloneType))
})
val rst_cnt = RegInit(0.U(log2Up(nSets+1).W))
val rst = rst_cnt < nSets.U
val waddr = Mux(rst, rst_cnt, io.write.bits.idx)
val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt
val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools
val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools
when (rst) { rst_cnt := rst_cnt+1.U }
val metabits = rstVal.getWidth
val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W)))
val wen = rst || io.write.valid
when (wen) {
tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask)
}
io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal)))
io.read.ready := !wen // so really this could be a 6T RAM
io.write.ready := !rst
}
File ECC.scala:
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
abstract class Decoding
{
def uncorrected: UInt
def corrected: UInt
def correctable: Bool
def uncorrectable: Bool // If true, correctable should be ignored
def error = correctable || uncorrectable
}
abstract class Code
{
def canDetect: Boolean
def canCorrect: Boolean
def width(w0: Int): Int
/** Takes the unencoded width and returns a list of indices indicating which
* bits of the encoded value will be used for ecc
*/
def eccIndices(width: Int): Seq[Int]
/** Encode x to a codeword suitable for decode.
* If poison is true, the decoded value will report uncorrectable
* error despite uncorrected == corrected == x.
*/
def encode(x: UInt, poison: Bool = false.B): UInt
def decode(x: UInt): Decoding
/** Copy the bits in x to the right bit positions in an encoded word,
* so that x === decode(swizzle(x)).uncorrected; but don't generate
* the other code bits, so decode(swizzle(x)).error might be true.
* For codes for which this operation is not trivial, throw an
* UnsupportedOperationException. */
def swizzle(x: UInt): UInt
}
class IdentityCode extends Code
{
def canDetect = false
def canCorrect = false
def width(w0: Int) = w0
def eccIndices(width: Int) = Seq.empty[Int]
def encode(x: UInt, poison: Bool = false.B) = {
require (poison.isLit && poison.litValue == 0, "IdentityCode can not be poisoned")
x
}
def swizzle(x: UInt) = x
def decode(y: UInt) = new Decoding {
def uncorrected = y
def corrected = y
def correctable = false.B
def uncorrectable = false.B
}
}
class ParityCode extends Code
{
def canDetect = true
def canCorrect = false
def width(w0: Int) = w0+1
def eccIndices(w0: Int) = Seq(w0)
def encode(x: UInt, poison: Bool = false.B) = Cat(x.xorR ^ poison, x)
def swizzle(x: UInt) = Cat(false.B, x)
def decode(y: UInt) = new Decoding {
val uncorrected = y(y.getWidth-2,0)
val corrected = uncorrected
val correctable = false.B
val uncorrectable = y.xorR
}
}
class SECCode extends Code
{
def canDetect = true
def canCorrect = true
// SEC codes may or may not be poisonous depending on the length
// If the code is perfect, every non-codeword is correctable
def poisonous(n: Int) = !isPow2(n+1)
def width(k: Int) = {
val m = log2Floor(k) + 1
k + m + (if((1 << m) < m+k+1) 1 else 0)
}
def eccIndices(w0: Int) = {
(0 until width(w0)).collect {
case i if i >= w0 => i
}
}
def swizzle(x: UInt) = {
val k = x.getWidth
val n = width(k)
Cat(0.U((n-k).W), x)
}
// An (n=16, k=11) Hamming code is naturally encoded as:
// PPxPxxxPxxxxxxxP where P are parity bits and x are data
// Indexes typically start at 1, because then the P are on powers of two
// In systematic coding, you put all the data in the front:
// xxxxxxxxxxxPPPPP
// Indexes typically start at 0, because Computer Science
// For sanity when reading SRAMs, you want systematic form.
private def impl(n: Int, k: Int) = {
require (n >= 3 && k >= 1 && !isPow2(n))
val hamm2sys = IndexedSeq.tabulate(n+1) { i =>
if (i == 0) {
n /* undefined */
} else if (isPow2(i)) {
k + log2Ceil(i)
} else {
i - 1 - log2Ceil(i)
}
}
val sys2hamm = hamm2sys.zipWithIndex.sortBy(_._1).map(_._2).toIndexedSeq
def syndrome(j: Int) = {
val bit = 1 << j
("b" + Seq.tabulate(n) { i =>
if ((sys2hamm(i) & bit) != 0) "1" else "0"
}.reverse.mkString).U
}
(hamm2sys, sys2hamm, syndrome _)
}
def encode(x: UInt, poison: Bool = false.B) = {
val k = x.getWidth
val n = width(k)
val (_, _, syndrome) = impl(n, k)
require ((poison.isLit && poison.litValue == 0) || poisonous(n), s"SEC code of length ${n} cannot be poisoned")
/* By setting the entire syndrome on poison, the corrected bit falls off the end of the code */
val syndromeUInt = VecInit.tabulate(n-k) { j => (syndrome(j)(k-1, 0) & x).xorR ^ poison }.asUInt
Cat(syndromeUInt, x)
}
def decode(y: UInt) = new Decoding {
val n = y.getWidth
val k = n - log2Ceil(n)
val (_, sys2hamm, syndrome) = impl(n, k)
val syndromeUInt = VecInit.tabulate(n-k) { j => (syndrome(j) & y).xorR }.asUInt
val hammBadBitOH = UIntToOH(syndromeUInt, n+1)
val sysBadBitOH = VecInit.tabulate(k) { i => hammBadBitOH(sys2hamm(i)) }.asUInt
val uncorrected = y(k-1, 0)
val corrected = uncorrected ^ sysBadBitOH
val correctable = syndromeUInt.orR
val uncorrectable = if (poisonous(n)) { syndromeUInt > n.U } else { false.B }
}
}
class SECDEDCode extends Code
{
def canDetect = true
def canCorrect = true
private val sec = new SECCode
private val par = new ParityCode
def width(k: Int) = sec.width(k)+1
def eccIndices(w0: Int) = {
(0 until width(w0)).collect {
case i if i >= w0 => i
}
}
def encode(x: UInt, poison: Bool = false.B) = {
// toggling two bits ensures the error is uncorrectable
// to ensure corrected == uncorrected, we pick one redundant
// bit from SEC (the highest); correcting it does not affect
// corrected == uncorrected. the second toggled bit is the
// parity bit, which also does not appear in the decoding
val toggle_lo = Cat(poison.asUInt, poison.asUInt)
val toggle_hi = toggle_lo << (sec.width(x.getWidth)-1)
par.encode(sec.encode(x)) ^ toggle_hi
}
def swizzle(x: UInt) = par.swizzle(sec.swizzle(x))
def decode(x: UInt) = new Decoding {
val secdec = sec.decode(x(x.getWidth-2,0))
val pardec = par.decode(x)
val uncorrected = secdec.uncorrected
val corrected = secdec.corrected
val correctable = pardec.uncorrectable
val uncorrectable = !pardec.uncorrectable && secdec.correctable
}
}
object ErrGen
{
// generate a 1-bit error with approximate probability 2^-f
def apply(width: Int, f: Int): UInt = {
require(width > 0 && f >= 0 && log2Up(width) + f <= 16)
UIntToOH(LFSR(16)(log2Up(width)+f-1,0))(width-1,0)
}
def apply(x: UInt, f: Int): UInt = x ^ apply(x.getWidth, f)
}
trait CanHaveErrors extends Bundle {
val correctable: Option[ValidIO[UInt]]
val uncorrectable: Option[ValidIO[UInt]]
}
case class ECCParams(
bytes: Int = 1,
code: Code = new IdentityCode,
notifyErrors: Boolean = false,
)
object Code {
def fromString(s: Option[String]): Code = fromString(s.getOrElse("none"))
def fromString(s: String): Code = s.toLowerCase match {
case "none" => new IdentityCode
case "identity" => new IdentityCode
case "parity" => new ParityCode
case "sec" => new SECCode
case "secded" => new SECDEDCode
case _ => throw new IllegalArgumentException("Unknown ECC type")
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class ECCTest(k: Int, timeout: Int = 500000) extends UnitTest(timeout) {
val code = new SECDEDCode
val n = code.width(k)
// Brute force the decode space
val test = RegInit(0.U((n+1).W))
val last = test(n)
test := test + !last
io.finished := RegNext(last, false.B)
// Confirm the decoding matches the encoding
val decoded = code.decode(test(n-1, 0))
val recoded = code.encode(decoded.corrected)
val distance = PopCount(recoded ^ test)
// Count the cases
val correct = RegInit(0.U(n.W))
val correctable = RegInit(0.U(n.W))
val uncorrectable = RegInit(0.U(n.W))
when (!last) {
when (decoded.uncorrectable) {
assert (distance >= 2.U) // uncorrectable
uncorrectable := uncorrectable + 1.U
} .elsewhen (decoded.correctable) {
assert (distance(0)) // correctable => odd bit errors
correctable := correctable + 1.U
} .otherwise {
assert (distance === 0.U) // correct
assert (decoded.uncorrected === decoded.corrected)
correct := correct + 1.U
}
}
// Expected number of each case
val nCodes = BigInt(1) << n
val nCorrect = BigInt(1) << k
val nCorrectable = nCodes / 2
val nUncorrectable = nCodes - nCorrectable - nCorrect
when (last) {
assert (correct === nCorrect.U)
assert (correctable === nCorrectable.U)
assert (uncorrectable === nUncorrectable.U)
}
}
File Consts.scala:
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket.constants
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
trait ScalarOpConstants {
val SZ_BR = 3
def BR_X = BitPat("b???")
def BR_EQ = 0.U(3.W)
def BR_NE = 1.U(3.W)
def BR_J = 2.U(3.W)
def BR_N = 3.U(3.W)
def BR_LT = 4.U(3.W)
def BR_GE = 5.U(3.W)
def BR_LTU = 6.U(3.W)
def BR_GEU = 7.U(3.W)
def A1_X = BitPat("b??")
def A1_ZERO = 0.U(2.W)
def A1_RS1 = 1.U(2.W)
def A1_PC = 2.U(2.W)
def A1_RS1SHL = 3.U(2.W)
def IMM_X = BitPat("b???")
def IMM_S = 0.U(3.W)
def IMM_SB = 1.U(3.W)
def IMM_U = 2.U(3.W)
def IMM_UJ = 3.U(3.W)
def IMM_I = 4.U(3.W)
def IMM_Z = 5.U(3.W)
def A2_X = BitPat("b???")
def A2_ZERO = 0.U(3.W)
def A2_SIZE = 1.U(3.W)
def A2_RS2 = 2.U(3.W)
def A2_IMM = 3.U(3.W)
def A2_RS2OH = 4.U(3.W)
def A2_IMMOH = 5.U(3.W)
def X = BitPat("b?")
def N = BitPat("b0")
def Y = BitPat("b1")
val SZ_DW = 1
def DW_X = X
def DW_32 = false.B
def DW_64 = true.B
def DW_XPR = DW_64
}
trait MemoryOpConstants {
val NUM_XA_OPS = 9
val M_SZ = 5
def M_X = BitPat("b?????");
def M_XRD = "b00000".U; // int load
def M_XWR = "b00001".U; // int store
def M_PFR = "b00010".U; // prefetch with intent to read
def M_PFW = "b00011".U; // prefetch with intent to write
def M_XA_SWAP = "b00100".U
def M_FLUSH_ALL = "b00101".U // flush all lines
def M_XLR = "b00110".U
def M_XSC = "b00111".U
def M_XA_ADD = "b01000".U
def M_XA_XOR = "b01001".U
def M_XA_OR = "b01010".U
def M_XA_AND = "b01011".U
def M_XA_MIN = "b01100".U
def M_XA_MAX = "b01101".U
def M_XA_MINU = "b01110".U
def M_XA_MAXU = "b01111".U
def M_FLUSH = "b10000".U // write back dirty data and cede R/W permissions
def M_PWR = "b10001".U // partial (masked) store
def M_PRODUCE = "b10010".U // write back dirty data and cede W permissions
def M_CLEAN = "b10011".U // write back dirty data and retain R/W permissions
def M_SFENCE = "b10100".U // SFENCE.VMA
def M_HFENCEV = "b10101".U // HFENCE.VVMA
def M_HFENCEG = "b10110".U // HFENCE.GVMA
def M_WOK = "b10111".U // check write permissions but don't perform a write
def M_HLVX = "b10000".U // HLVX instruction
def isAMOLogical(cmd: UInt) = cmd.isOneOf(M_XA_SWAP, M_XA_XOR, M_XA_OR, M_XA_AND)
def isAMOArithmetic(cmd: UInt) = cmd.isOneOf(M_XA_ADD, M_XA_MIN, M_XA_MAX, M_XA_MINU, M_XA_MAXU)
def isAMO(cmd: UInt) = isAMOLogical(cmd) || isAMOArithmetic(cmd)
def isPrefetch(cmd: UInt) = cmd === M_PFR || cmd === M_PFW
def isRead(cmd: UInt) = cmd.isOneOf(M_XRD, M_HLVX, M_XLR, M_XSC) || isAMO(cmd)
def isWrite(cmd: UInt) = cmd === M_XWR || cmd === M_PWR || cmd === M_XSC || isAMO(cmd)
def isWriteIntent(cmd: UInt) = isWrite(cmd) || cmd === M_PFW || cmd === M_XLR
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File TLB.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.devices.debug.DebugModuleKey
import freechips.rocketchip.diplomacy.RegionType
import freechips.rocketchip.subsystem.CacheBlockBytes
import freechips.rocketchip.tile.{CoreModule, CoreBundle}
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util.{OptimizationBarrier, SetAssocLRU, PseudoLRU, PopCountAtLeast, property}
import freechips.rocketchip.util.BooleanToAugmentedBoolean
import freechips.rocketchip.util.IntToAugmentedInt
import freechips.rocketchip.util.UIntToAugmentedUInt
import freechips.rocketchip.util.UIntIsOneOf
import freechips.rocketchip.util.SeqToAugmentedSeq
import freechips.rocketchip.util.SeqBoolBitwiseOps
case object ASIdBits extends Field[Int](0)
case object VMIdBits extends Field[Int](0)
/** =SFENCE=
* rs1 rs2
* {{{
* 0 0 -> flush All
* 0 1 -> flush by ASID
* 1 1 -> flush by ADDR
* 1 0 -> flush by ADDR and ASID
* }}}
* {{{
* If rs1=x0 and rs2=x0, the fence orders all reads and writes made to any level of the page tables, for all address spaces.
* If rs1=x0 and rs2!=x0, the fence orders all reads and writes made to any level of the page tables, but only for the address space identified by integer register rs2. Accesses to global mappings (see Section 4.3.1) are not ordered.
* If rs1!=x0 and rs2=x0, the fence orders only reads and writes made to the leaf page table entry corresponding to the virtual address in rs1, for all address spaces.
* If rs1!=x0 and rs2!=x0, the fence orders only reads and writes made to the leaf page table entry corresponding to the virtual address in rs1, for the address space identified by integer register rs2. Accesses to global mappings are not ordered.
* }}}
*/
class SFenceReq(implicit p: Parameters) extends CoreBundle()(p) {
val rs1 = Bool()
val rs2 = Bool()
val addr = UInt(vaddrBits.W)
val asid = UInt((asIdBits max 1).W) // TODO zero-width
val hv = Bool()
val hg = Bool()
}
class TLBReq(lgMaxSize: Int)(implicit p: Parameters) extends CoreBundle()(p) {
/** request address from CPU. */
val vaddr = UInt(vaddrBitsExtended.W)
/** don't lookup TLB, bypass vaddr as paddr */
val passthrough = Bool()
/** granularity */
val size = UInt(log2Ceil(lgMaxSize + 1).W)
/** memory command. */
val cmd = Bits(M_SZ.W)
val prv = UInt(PRV.SZ.W)
/** virtualization mode */
val v = Bool()
}
class TLBExceptions extends Bundle {
val ld = Bool()
val st = Bool()
val inst = Bool()
}
class TLBResp(lgMaxSize: Int = 3)(implicit p: Parameters) extends CoreBundle()(p) {
// lookup responses
val miss = Bool()
/** physical address */
val paddr = UInt(paddrBits.W)
val gpa = UInt(vaddrBitsExtended.W)
val gpa_is_pte = Bool()
/** page fault exception */
val pf = new TLBExceptions
/** guest page fault exception */
val gf = new TLBExceptions
/** access exception */
val ae = new TLBExceptions
/** misaligned access exception */
val ma = new TLBExceptions
/** if this address is cacheable */
val cacheable = Bool()
/** if caches must allocate this address */
val must_alloc = Bool()
/** if this address is prefetchable for caches*/
val prefetchable = Bool()
/** size/cmd of request that generated this response*/
val size = UInt(log2Ceil(lgMaxSize + 1).W)
val cmd = UInt(M_SZ.W)
}
class TLBEntryData(implicit p: Parameters) extends CoreBundle()(p) {
val ppn = UInt(ppnBits.W)
/** pte.u user */
val u = Bool()
/** pte.g global */
val g = Bool()
/** access exception.
* D$ -> PTW -> TLB AE
* Alignment failed.
*/
val ae_ptw = Bool()
val ae_final = Bool()
val ae_stage2 = Bool()
/** page fault */
val pf = Bool()
/** guest page fault */
val gf = Bool()
/** supervisor write */
val sw = Bool()
/** supervisor execute */
val sx = Bool()
/** supervisor read */
val sr = Bool()
/** hypervisor write */
val hw = Bool()
/** hypervisor excute */
val hx = Bool()
/** hypervisor read */
val hr = Bool()
/** prot_w */
val pw = Bool()
/** prot_x */
val px = Bool()
/** prot_r */
val pr = Bool()
/** PutPartial */
val ppp = Bool()
/** AMO logical */
val pal = Bool()
/** AMO arithmetic */
val paa = Bool()
/** get/put effects */
val eff = Bool()
/** cacheable */
val c = Bool()
/** fragmented_superpage support */
val fragmented_superpage = Bool()
}
/** basic cell for TLB data */
class TLBEntry(val nSectors: Int, val superpage: Boolean, val superpageOnly: Boolean)(implicit p: Parameters) extends CoreBundle()(p) {
require(nSectors == 1 || !superpage)
require(!superpageOnly || superpage)
val level = UInt(log2Ceil(pgLevels).W)
/** use vpn as tag */
val tag_vpn = UInt(vpnBits.W)
/** tag in vitualization mode */
val tag_v = Bool()
/** entry data */
val data = Vec(nSectors, UInt(new TLBEntryData().getWidth.W))
/** valid bit */
val valid = Vec(nSectors, Bool())
/** returns all entry data in this entry */
def entry_data = data.map(_.asTypeOf(new TLBEntryData))
/** returns the index of sector */
private def sectorIdx(vpn: UInt) = vpn.extract(nSectors.log2-1, 0)
/** returns the entry data matched with this vpn*/
def getData(vpn: UInt) = OptimizationBarrier(data(sectorIdx(vpn)).asTypeOf(new TLBEntryData))
/** returns whether a sector hits */
def sectorHit(vpn: UInt, virtual: Bool) = valid.orR && sectorTagMatch(vpn, virtual)
/** returns whether tag matches vpn */
def sectorTagMatch(vpn: UInt, virtual: Bool) = (((tag_vpn ^ vpn) >> nSectors.log2) === 0.U) && (tag_v === virtual)
/** returns hit signal */
def hit(vpn: UInt, virtual: Bool): Bool = {
if (superpage && usingVM) {
var tagMatch = valid.head && (tag_v === virtual)
for (j <- 0 until pgLevels) {
val base = (pgLevels - 1 - j) * pgLevelBits
val n = pgLevelBits + (if (j == 0) hypervisorExtraAddrBits else 0)
val ignore = level < j.U || (superpageOnly && j == pgLevels - 1).B
tagMatch = tagMatch && (ignore || (tag_vpn ^ vpn)(base + n - 1, base) === 0.U)
}
tagMatch
} else {
val idx = sectorIdx(vpn)
valid(idx) && sectorTagMatch(vpn, virtual)
}
}
/** returns the ppn of the input TLBEntryData */
def ppn(vpn: UInt, data: TLBEntryData) = {
val supervisorVPNBits = pgLevels * pgLevelBits
if (superpage && usingVM) {
var res = data.ppn >> pgLevelBits*(pgLevels - 1)
for (j <- 1 until pgLevels) {
val ignore = level < j.U || (superpageOnly && j == pgLevels - 1).B
res = Cat(res, (Mux(ignore, vpn, 0.U) | data.ppn)(supervisorVPNBits - j*pgLevelBits - 1, supervisorVPNBits - (j + 1)*pgLevelBits))
}
res
} else {
data.ppn
}
}
/** does the refill
*
* find the target entry with vpn tag
* and replace the target entry with the input entry data
*/
def insert(vpn: UInt, virtual: Bool, level: UInt, entry: TLBEntryData): Unit = {
this.tag_vpn := vpn
this.tag_v := virtual
this.level := level.extract(log2Ceil(pgLevels - superpageOnly.toInt)-1, 0)
val idx = sectorIdx(vpn)
valid(idx) := true.B
data(idx) := entry.asUInt
}
def invalidate(): Unit = { valid.foreach(_ := false.B) }
def invalidate(virtual: Bool): Unit = {
for ((v, e) <- valid zip entry_data)
when (tag_v === virtual) { v := false.B }
}
def invalidateVPN(vpn: UInt, virtual: Bool): Unit = {
if (superpage) {
when (hit(vpn, virtual)) { invalidate() }
} else {
when (sectorTagMatch(vpn, virtual)) {
for (((v, e), i) <- (valid zip entry_data).zipWithIndex)
when (tag_v === virtual && i.U === sectorIdx(vpn)) { v := false.B }
}
}
// For fragmented superpage mappings, we assume the worst (largest)
// case, and zap entries whose most-significant VPNs match
when (((tag_vpn ^ vpn) >> (pgLevelBits * (pgLevels - 1))) === 0.U) {
for ((v, e) <- valid zip entry_data)
when (tag_v === virtual && e.fragmented_superpage) { v := false.B }
}
}
def invalidateNonGlobal(virtual: Bool): Unit = {
for ((v, e) <- valid zip entry_data)
when (tag_v === virtual && !e.g) { v := false.B }
}
}
/** TLB config
*
* @param nSets the number of sets of PTE, follow [[ICacheParams.nSets]]
* @param nWays the total number of wayss of PTE, follow [[ICacheParams.nWays]]
* @param nSectors the number of ways in a single PTE TLBEntry
* @param nSuperpageEntries the number of SuperpageEntries
*/
case class TLBConfig(
nSets: Int,
nWays: Int,
nSectors: Int = 4,
nSuperpageEntries: Int = 4)
/** =Overview=
* [[TLB]] is a TLB template which contains PMA logic and PMP checker.
*
* TLB caches PTE and accelerates the address translation process.
* When tlb miss happens, ask PTW(L2TLB) for Page Table Walk.
* Perform PMP and PMA check during the translation and throw exception if there were any.
*
* ==Cache Structure==
* - Sectored Entry (PTE)
* - set-associative or direct-mapped
* - nsets = [[TLBConfig.nSets]]
* - nways = [[TLBConfig.nWays]] / [[TLBConfig.nSectors]]
* - PTEEntry( sectors = [[TLBConfig.nSectors]] )
* - LRU(if set-associative)
*
* - Superpage Entry(superpage PTE)
* - fully associative
* - nsets = [[TLBConfig.nSuperpageEntries]]
* - PTEEntry(sectors = 1)
* - PseudoLRU
*
* - Special Entry(PTE across PMP)
* - nsets = 1
* - PTEEntry(sectors = 1)
*
* ==Address structure==
* {{{
* |vaddr |
* |ppn/vpn | pgIndex |
* | | |
* | |nSets |nSector | |}}}
*
* ==State Machine==
* {{{
* s_ready: ready to accept request from CPU.
* s_request: when L1TLB(this) miss, send request to PTW(L2TLB), .
* s_wait: wait for PTW to refill L1TLB.
* s_wait_invalidate: L1TLB is waiting for respond from PTW, but L1TLB will invalidate respond from PTW.}}}
*
* ==PMP==
* pmp check
* - special_entry: always check
* - other entry: check on refill
*
* ==Note==
* PMA consume diplomacy parameter generate physical memory address checking logic
*
* Boom use Rocket ITLB, and its own DTLB.
*
* Accelerators:{{{
* sha3: DTLB
* gemmini: DTLB
* hwacha: DTLB*2+ITLB}}}
* @param instruction true for ITLB, false for DTLB
* @param lgMaxSize @todo seems granularity
* @param cfg [[TLBConfig]]
* @param edge collect SoC metadata.
*/
class TLB(instruction: Boolean, lgMaxSize: Int, cfg: TLBConfig)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) {
override def desiredName = if (instruction) "ITLB" else "DTLB"
val io = IO(new Bundle {
/** request from Core */
val req = Flipped(Decoupled(new TLBReq(lgMaxSize)))
/** response to Core */
val resp = Output(new TLBResp(lgMaxSize))
/** SFence Input */
val sfence = Flipped(Valid(new SFenceReq))
/** IO to PTW */
val ptw = new TLBPTWIO
/** suppress a TLB refill, one cycle after a miss */
val kill = Input(Bool())
})
io.ptw.customCSRs := DontCare
val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits)
val vpn = io.req.bits.vaddr(vaddrBits-1, pgIdxBits)
/** index for sectored_Entry */
val memIdx = vpn.extract(cfg.nSectors.log2 + cfg.nSets.log2 - 1, cfg.nSectors.log2)
/** TLB Entry */
val sectored_entries = Reg(Vec(cfg.nSets, Vec(cfg.nWays / cfg.nSectors, new TLBEntry(cfg.nSectors, false, false))))
/** Superpage Entry */
val superpage_entries = Reg(Vec(cfg.nSuperpageEntries, new TLBEntry(1, true, true)))
/** Special Entry
*
* If PMP granularity is less than page size, thus need additional "special" entry manage PMP.
*/
val special_entry = (!pageGranularityPMPs).option(Reg(new TLBEntry(1, true, false)))
def ordinary_entries = sectored_entries(memIdx) ++ superpage_entries
def all_entries = ordinary_entries ++ special_entry
def all_real_entries = sectored_entries.flatten ++ superpage_entries ++ special_entry
val s_ready :: s_request :: s_wait :: s_wait_invalidate :: Nil = Enum(4)
val state = RegInit(s_ready)
// use vpn as refill_tag
val r_refill_tag = Reg(UInt(vpnBits.W))
val r_superpage_repl_addr = Reg(UInt(log2Ceil(superpage_entries.size).W))
val r_sectored_repl_addr = Reg(UInt(log2Ceil(sectored_entries.head.size).W))
val r_sectored_hit = Reg(Valid(UInt(log2Ceil(sectored_entries.head.size).W)))
val r_superpage_hit = Reg(Valid(UInt(log2Ceil(superpage_entries.size).W)))
val r_vstage1_en = Reg(Bool())
val r_stage2_en = Reg(Bool())
val r_need_gpa = Reg(Bool())
val r_gpa_valid = Reg(Bool())
val r_gpa = Reg(UInt(vaddrBits.W))
val r_gpa_vpn = Reg(UInt(vpnBits.W))
val r_gpa_is_pte = Reg(Bool())
/** privilege mode */
val priv = io.req.bits.prv
val priv_v = usingHypervisor.B && io.req.bits.v
val priv_s = priv(0)
// user mode and supervisor mode
val priv_uses_vm = priv <= PRV.S.U
val satp = Mux(priv_v, io.ptw.vsatp, io.ptw.ptbr)
val stage1_en = usingVM.B && satp.mode(satp.mode.getWidth-1)
/** VS-stage translation enable */
val vstage1_en = usingHypervisor.B && priv_v && io.ptw.vsatp.mode(io.ptw.vsatp.mode.getWidth-1)
/** G-stage translation enable */
val stage2_en = usingHypervisor.B && priv_v && io.ptw.hgatp.mode(io.ptw.hgatp.mode.getWidth-1)
/** Enable Virtual Memory when:
* 1. statically configured
* 1. satp highest bits enabled
* i. RV32:
* - 0 -> Bare
* - 1 -> SV32
* i. RV64:
* - 0000 -> Bare
* - 1000 -> SV39
* - 1001 -> SV48
* - 1010 -> SV57
* - 1011 -> SV64
* 1. In virtualization mode, vsatp highest bits enabled
* 1. priv mode in U and S.
* 1. in H & M mode, disable VM.
* 1. no passthrough(micro-arch defined.)
*
* @see RV-priv spec 4.1.11 Supervisor Address Translation and Protection (satp) Register
* @see RV-priv spec 8.2.18 Virtual Supervisor Address Translation and Protection Register (vsatp)
*/
val vm_enabled = (stage1_en || stage2_en) && priv_uses_vm && !io.req.bits.passthrough
// flush guest entries on vsatp.MODE Bare <-> SvXX transitions
val v_entries_use_stage1 = RegInit(false.B)
val vsatp_mode_mismatch = priv_v && (vstage1_en =/= v_entries_use_stage1) && !io.req.bits.passthrough
// share a single physical memory attribute checker (unshare if critical path)
val refill_ppn = io.ptw.resp.bits.pte.ppn(ppnBits-1, 0)
/** refill signal */
val do_refill = usingVM.B && io.ptw.resp.valid
/** sfence invalidate refill */
val invalidate_refill = state.isOneOf(s_request /* don't care */, s_wait_invalidate) || io.sfence.valid
// PMP
val mpu_ppn = Mux(do_refill, refill_ppn,
Mux(vm_enabled && special_entry.nonEmpty.B, special_entry.map(e => e.ppn(vpn, e.getData(vpn))).getOrElse(0.U), io.req.bits.vaddr >> pgIdxBits))
val mpu_physaddr = Cat(mpu_ppn, io.req.bits.vaddr(pgIdxBits-1, 0))
val mpu_priv = Mux[UInt](usingVM.B && (do_refill || io.req.bits.passthrough /* PTW */), PRV.S.U, Cat(io.ptw.status.debug, priv))
val pmp = Module(new PMPChecker(lgMaxSize))
pmp.io.addr := mpu_physaddr
pmp.io.size := io.req.bits.size
pmp.io.pmp := (io.ptw.pmp: Seq[PMP])
pmp.io.prv := mpu_priv
val pma = Module(new PMAChecker(edge.manager)(p))
pma.io.paddr := mpu_physaddr
// todo: using DataScratchpad doesn't support cacheable.
val cacheable = pma.io.resp.cacheable && (instruction || !usingDataScratchpad).B
val homogeneous = TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), BigInt(1) << pgIdxBits, 1 << lgMaxSize)(mpu_physaddr).homogeneous
// In M mode, if access DM address(debug module program buffer)
val deny_access_to_debug = mpu_priv <= PRV.M.U && p(DebugModuleKey).map(dmp => dmp.address.contains(mpu_physaddr)).getOrElse(false.B)
val prot_r = pma.io.resp.r && !deny_access_to_debug && pmp.io.r
val prot_w = pma.io.resp.w && !deny_access_to_debug && pmp.io.w
val prot_pp = pma.io.resp.pp
val prot_al = pma.io.resp.al
val prot_aa = pma.io.resp.aa
val prot_x = pma.io.resp.x && !deny_access_to_debug && pmp.io.x
val prot_eff = pma.io.resp.eff
// hit check
val sector_hits = sectored_entries(memIdx).map(_.sectorHit(vpn, priv_v))
val superpage_hits = superpage_entries.map(_.hit(vpn, priv_v))
val hitsVec = all_entries.map(vm_enabled && _.hit(vpn, priv_v))
val real_hits = hitsVec.asUInt
val hits = Cat(!vm_enabled, real_hits)
// use ptw response to refill
// permission bit arrays
when (do_refill) {
val pte = io.ptw.resp.bits.pte
val refill_v = r_vstage1_en || r_stage2_en
val newEntry = Wire(new TLBEntryData)
newEntry.ppn := pte.ppn
newEntry.c := cacheable
newEntry.u := pte.u
newEntry.g := pte.g && pte.v
newEntry.ae_ptw := io.ptw.resp.bits.ae_ptw
newEntry.ae_final := io.ptw.resp.bits.ae_final
newEntry.ae_stage2 := io.ptw.resp.bits.ae_final && io.ptw.resp.bits.gpa_is_pte && r_stage2_en
newEntry.pf := io.ptw.resp.bits.pf
newEntry.gf := io.ptw.resp.bits.gf
newEntry.hr := io.ptw.resp.bits.hr
newEntry.hw := io.ptw.resp.bits.hw
newEntry.hx := io.ptw.resp.bits.hx
newEntry.sr := pte.sr()
newEntry.sw := pte.sw()
newEntry.sx := pte.sx()
newEntry.pr := prot_r
newEntry.pw := prot_w
newEntry.px := prot_x
newEntry.ppp := prot_pp
newEntry.pal := prot_al
newEntry.paa := prot_aa
newEntry.eff := prot_eff
newEntry.fragmented_superpage := io.ptw.resp.bits.fragmented_superpage
// refill special_entry
when (special_entry.nonEmpty.B && !io.ptw.resp.bits.homogeneous) {
special_entry.foreach(_.insert(r_refill_tag, refill_v, io.ptw.resp.bits.level, newEntry))
}.elsewhen (io.ptw.resp.bits.level < (pgLevels-1).U) {
val waddr = Mux(r_superpage_hit.valid && usingHypervisor.B, r_superpage_hit.bits, r_superpage_repl_addr)
for ((e, i) <- superpage_entries.zipWithIndex) when (r_superpage_repl_addr === i.U) {
e.insert(r_refill_tag, refill_v, io.ptw.resp.bits.level, newEntry)
when (invalidate_refill) { e.invalidate() }
}
// refill sectored_hit
}.otherwise {
val r_memIdx = r_refill_tag.extract(cfg.nSectors.log2 + cfg.nSets.log2 - 1, cfg.nSectors.log2)
val waddr = Mux(r_sectored_hit.valid, r_sectored_hit.bits, r_sectored_repl_addr)
for ((e, i) <- sectored_entries(r_memIdx).zipWithIndex) when (waddr === i.U) {
when (!r_sectored_hit.valid) { e.invalidate() }
e.insert(r_refill_tag, refill_v, 0.U, newEntry)
when (invalidate_refill) { e.invalidate() }
}
}
r_gpa_valid := io.ptw.resp.bits.gpa.valid
r_gpa := io.ptw.resp.bits.gpa.bits
r_gpa_is_pte := io.ptw.resp.bits.gpa_is_pte
}
// get all entries data.
val entries = all_entries.map(_.getData(vpn))
val normal_entries = entries.take(ordinary_entries.size)
// parallel query PPN from [[all_entries]], if VM not enabled return VPN instead
val ppn = Mux1H(hitsVec :+ !vm_enabled, (all_entries zip entries).map{ case (entry, data) => entry.ppn(vpn, data) } :+ vpn(ppnBits-1, 0))
val nPhysicalEntries = 1 + special_entry.size
// generally PTW misaligned load exception.
val ptw_ae_array = Cat(false.B, entries.map(_.ae_ptw).asUInt)
val final_ae_array = Cat(false.B, entries.map(_.ae_final).asUInt)
val ptw_pf_array = Cat(false.B, entries.map(_.pf).asUInt)
val ptw_gf_array = Cat(false.B, entries.map(_.gf).asUInt)
val sum = Mux(priv_v, io.ptw.gstatus.sum, io.ptw.status.sum)
// if in hypervisor/machine mode, cannot read/write user entries.
// if in superviosr/user mode, "If the SUM bit in the sstatus register is set, supervisor mode software may also access pages with U=1.(from spec)"
val priv_rw_ok = Mux(!priv_s || sum, entries.map(_.u).asUInt, 0.U) | Mux(priv_s, ~entries.map(_.u).asUInt, 0.U)
// if in hypervisor/machine mode, other than user pages, all pages are executable.
// if in superviosr/user mode, only user page can execute.
val priv_x_ok = Mux(priv_s, ~entries.map(_.u).asUInt, entries.map(_.u).asUInt)
val stage1_bypass = Fill(entries.size, usingHypervisor.B) & (Fill(entries.size, !stage1_en) | entries.map(_.ae_stage2).asUInt)
val mxr = io.ptw.status.mxr | Mux(priv_v, io.ptw.gstatus.mxr, false.B)
// "The vsstatus field MXR, which makes execute-only pages readable, only overrides VS-stage page protection.(from spec)"
val r_array = Cat(true.B, (priv_rw_ok & (entries.map(_.sr).asUInt | Mux(mxr, entries.map(_.sx).asUInt, 0.U))) | stage1_bypass)
val w_array = Cat(true.B, (priv_rw_ok & entries.map(_.sw).asUInt) | stage1_bypass)
val x_array = Cat(true.B, (priv_x_ok & entries.map(_.sx).asUInt) | stage1_bypass)
val stage2_bypass = Fill(entries.size, !stage2_en)
val hr_array = Cat(true.B, entries.map(_.hr).asUInt | Mux(io.ptw.status.mxr, entries.map(_.hx).asUInt, 0.U) | stage2_bypass)
val hw_array = Cat(true.B, entries.map(_.hw).asUInt | stage2_bypass)
val hx_array = Cat(true.B, entries.map(_.hx).asUInt | stage2_bypass)
// These array is for each TLB entries.
// user mode can read: PMA OK, TLB OK, AE OK
val pr_array = Cat(Fill(nPhysicalEntries, prot_r), normal_entries.map(_.pr).asUInt) & ~(ptw_ae_array | final_ae_array)
// user mode can write: PMA OK, TLB OK, AE OK
val pw_array = Cat(Fill(nPhysicalEntries, prot_w), normal_entries.map(_.pw).asUInt) & ~(ptw_ae_array | final_ae_array)
// user mode can write: PMA OK, TLB OK, AE OK
val px_array = Cat(Fill(nPhysicalEntries, prot_x), normal_entries.map(_.px).asUInt) & ~(ptw_ae_array | final_ae_array)
// put effect
val eff_array = Cat(Fill(nPhysicalEntries, prot_eff), normal_entries.map(_.eff).asUInt)
// cacheable
val c_array = Cat(Fill(nPhysicalEntries, cacheable), normal_entries.map(_.c).asUInt)
// put partial
val ppp_array = Cat(Fill(nPhysicalEntries, prot_pp), normal_entries.map(_.ppp).asUInt)
// atomic arithmetic
val paa_array = Cat(Fill(nPhysicalEntries, prot_aa), normal_entries.map(_.paa).asUInt)
// atomic logic
val pal_array = Cat(Fill(nPhysicalEntries, prot_al), normal_entries.map(_.pal).asUInt)
val ppp_array_if_cached = ppp_array | c_array
val paa_array_if_cached = paa_array | (if(usingAtomicsInCache) c_array else 0.U)
val pal_array_if_cached = pal_array | (if(usingAtomicsInCache) c_array else 0.U)
val prefetchable_array = Cat((cacheable && homogeneous) << (nPhysicalEntries-1), normal_entries.map(_.c).asUInt)
// vaddr misaligned: vaddr[1:0]=b00
val misaligned = (io.req.bits.vaddr & (UIntToOH(io.req.bits.size) - 1.U)).orR
def badVA(guestPA: Boolean): Bool = {
val additionalPgLevels = (if (guestPA) io.ptw.hgatp else satp).additionalPgLevels
val extraBits = if (guestPA) hypervisorExtraAddrBits else 0
val signed = !guestPA
val nPgLevelChoices = pgLevels - minPgLevels + 1
val minVAddrBits = pgIdxBits + minPgLevels * pgLevelBits + extraBits
(for (i <- 0 until nPgLevelChoices) yield {
val mask = ((BigInt(1) << vaddrBitsExtended) - (BigInt(1) << (minVAddrBits + i * pgLevelBits - signed.toInt))).U
val maskedVAddr = io.req.bits.vaddr & mask
additionalPgLevels === i.U && !(maskedVAddr === 0.U || signed.B && maskedVAddr === mask)
}).orR
}
val bad_gpa =
if (!usingHypervisor) false.B
else vm_enabled && !stage1_en && badVA(true)
val bad_va =
if (!usingVM || (minPgLevels == pgLevels && vaddrBits == vaddrBitsExtended)) false.B
else vm_enabled && stage1_en && badVA(false)
val cmd_lrsc = usingAtomics.B && io.req.bits.cmd.isOneOf(M_XLR, M_XSC)
val cmd_amo_logical = usingAtomics.B && isAMOLogical(io.req.bits.cmd)
val cmd_amo_arithmetic = usingAtomics.B && isAMOArithmetic(io.req.bits.cmd)
val cmd_put_partial = io.req.bits.cmd === M_PWR
val cmd_read = isRead(io.req.bits.cmd)
val cmd_readx = usingHypervisor.B && io.req.bits.cmd === M_HLVX
val cmd_write = isWrite(io.req.bits.cmd)
val cmd_write_perms = cmd_write ||
io.req.bits.cmd.isOneOf(M_FLUSH_ALL, M_WOK) // not a write, but needs write permissions
val lrscAllowed = Mux((usingDataScratchpad || usingAtomicsOnlyForIO).B, 0.U, c_array)
val ae_array =
Mux(misaligned, eff_array, 0.U) |
Mux(cmd_lrsc, ~lrscAllowed, 0.U)
// access exception needs SoC information from PMA
val ae_ld_array = Mux(cmd_read, ae_array | ~pr_array, 0.U)
val ae_st_array =
Mux(cmd_write_perms, ae_array | ~pw_array, 0.U) |
Mux(cmd_put_partial, ~ppp_array_if_cached, 0.U) |
Mux(cmd_amo_logical, ~pal_array_if_cached, 0.U) |
Mux(cmd_amo_arithmetic, ~paa_array_if_cached, 0.U)
val must_alloc_array =
Mux(cmd_put_partial, ~ppp_array, 0.U) |
Mux(cmd_amo_logical, ~pal_array, 0.U) |
Mux(cmd_amo_arithmetic, ~paa_array, 0.U) |
Mux(cmd_lrsc, ~0.U(pal_array.getWidth.W), 0.U)
val pf_ld_array = Mux(cmd_read, ((~Mux(cmd_readx, x_array, r_array) & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array, 0.U)
val pf_st_array = Mux(cmd_write_perms, ((~w_array & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array, 0.U)
val pf_inst_array = ((~x_array & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array
val gf_ld_array = Mux(priv_v && cmd_read, (~Mux(cmd_readx, hx_array, hr_array) | ptw_gf_array) & ~ptw_ae_array, 0.U)
val gf_st_array = Mux(priv_v && cmd_write_perms, (~hw_array | ptw_gf_array) & ~ptw_ae_array, 0.U)
val gf_inst_array = Mux(priv_v, (~hx_array | ptw_gf_array) & ~ptw_ae_array, 0.U)
val gpa_hits = {
val need_gpa_mask = if (instruction) gf_inst_array else gf_ld_array | gf_st_array
val hit_mask = Fill(ordinary_entries.size, r_gpa_valid && r_gpa_vpn === vpn) | Fill(all_entries.size, !vstage1_en)
hit_mask | ~need_gpa_mask(all_entries.size-1, 0)
}
val tlb_hit_if_not_gpa_miss = real_hits.orR
val tlb_hit = (real_hits & gpa_hits).orR
// leads to s_request
val tlb_miss = vm_enabled && !vsatp_mode_mismatch && !bad_va && !tlb_hit
val sectored_plru = new SetAssocLRU(cfg.nSets, sectored_entries.head.size, "plru")
val superpage_plru = new PseudoLRU(superpage_entries.size)
when (io.req.valid && vm_enabled) {
// replace
when (sector_hits.orR) { sectored_plru.access(memIdx, OHToUInt(sector_hits)) }
when (superpage_hits.orR) { superpage_plru.access(OHToUInt(superpage_hits)) }
}
// Superpages create the possibility that two entries in the TLB may match.
// This corresponds to a software bug, but we can't return complete garbage;
// we must return either the old translation or the new translation. This
// isn't compatible with the Mux1H approach. So, flush the TLB and report
// a miss on duplicate entries.
val multipleHits = PopCountAtLeast(real_hits, 2)
// only pull up req.ready when this is s_ready state.
io.req.ready := state === s_ready
// page fault
io.resp.pf.ld := (bad_va && cmd_read) || (pf_ld_array & hits).orR
io.resp.pf.st := (bad_va && cmd_write_perms) || (pf_st_array & hits).orR
io.resp.pf.inst := bad_va || (pf_inst_array & hits).orR
// guest page fault
io.resp.gf.ld := (bad_gpa && cmd_read) || (gf_ld_array & hits).orR
io.resp.gf.st := (bad_gpa && cmd_write_perms) || (gf_st_array & hits).orR
io.resp.gf.inst := bad_gpa || (gf_inst_array & hits).orR
// access exception
io.resp.ae.ld := (ae_ld_array & hits).orR
io.resp.ae.st := (ae_st_array & hits).orR
io.resp.ae.inst := (~px_array & hits).orR
// misaligned
io.resp.ma.ld := misaligned && cmd_read
io.resp.ma.st := misaligned && cmd_write
io.resp.ma.inst := false.B // this is up to the pipeline to figure out
io.resp.cacheable := (c_array & hits).orR
io.resp.must_alloc := (must_alloc_array & hits).orR
io.resp.prefetchable := (prefetchable_array & hits).orR && edge.manager.managers.forall(m => !m.supportsAcquireB || m.supportsHint).B
io.resp.miss := do_refill || vsatp_mode_mismatch || tlb_miss || multipleHits
io.resp.paddr := Cat(ppn, io.req.bits.vaddr(pgIdxBits-1, 0))
io.resp.size := io.req.bits.size
io.resp.cmd := io.req.bits.cmd
io.resp.gpa_is_pte := vstage1_en && r_gpa_is_pte
io.resp.gpa := {
val page = Mux(!vstage1_en, Cat(bad_gpa, vpn), r_gpa >> pgIdxBits)
val offset = Mux(io.resp.gpa_is_pte, r_gpa(pgIdxBits-1, 0), io.req.bits.vaddr(pgIdxBits-1, 0))
Cat(page, offset)
}
io.ptw.req.valid := state === s_request
io.ptw.req.bits.valid := !io.kill
io.ptw.req.bits.bits.addr := r_refill_tag
io.ptw.req.bits.bits.vstage1 := r_vstage1_en
io.ptw.req.bits.bits.stage2 := r_stage2_en
io.ptw.req.bits.bits.need_gpa := r_need_gpa
if (usingVM) {
when(io.ptw.req.fire && io.ptw.req.bits.valid) {
r_gpa_valid := false.B
r_gpa_vpn := r_refill_tag
}
val sfence = io.sfence.valid
// this is [[s_ready]]
// handle miss/hit at the first cycle.
// if miss, request PTW(L2TLB).
when (io.req.fire && tlb_miss) {
state := s_request
r_refill_tag := vpn
r_need_gpa := tlb_hit_if_not_gpa_miss
r_vstage1_en := vstage1_en
r_stage2_en := stage2_en
r_superpage_repl_addr := replacementEntry(superpage_entries, superpage_plru.way)
r_sectored_repl_addr := replacementEntry(sectored_entries(memIdx), sectored_plru.way(memIdx))
r_sectored_hit.valid := sector_hits.orR
r_sectored_hit.bits := OHToUInt(sector_hits)
r_superpage_hit.valid := superpage_hits.orR
r_superpage_hit.bits := OHToUInt(superpage_hits)
}
// Handle SFENCE.VMA when send request to PTW.
// SFENCE.VMA io.ptw.req.ready kill
// ? ? 1
// 0 0 0
// 0 1 0 -> s_wait
// 1 0 0 -> s_wait_invalidate
// 1 0 0 -> s_ready
when (state === s_request) {
// SFENCE.VMA will kill TLB entries based on rs1 and rs2. It will take 1 cycle.
when (sfence) { state := s_ready }
// here should be io.ptw.req.fire, but assert(io.ptw.req.ready === true.B)
// fire -> s_wait
when (io.ptw.req.ready) { state := Mux(sfence, s_wait_invalidate, s_wait) }
// If CPU kills request(frontend.s2_redirect)
when (io.kill) { state := s_ready }
}
// sfence in refill will results in invalidate
when (state === s_wait && sfence) {
state := s_wait_invalidate
}
// after CPU acquire response, go back to s_ready.
when (io.ptw.resp.valid) {
state := s_ready
}
// SFENCE processing logic.
when (sfence) {
assert(!io.sfence.bits.rs1 || (io.sfence.bits.addr >> pgIdxBits) === vpn)
for (e <- all_real_entries) {
val hv = usingHypervisor.B && io.sfence.bits.hv
val hg = usingHypervisor.B && io.sfence.bits.hg
when (!hg && io.sfence.bits.rs1) { e.invalidateVPN(vpn, hv) }
.elsewhen (!hg && io.sfence.bits.rs2) { e.invalidateNonGlobal(hv) }
.otherwise { e.invalidate(hv || hg) }
}
}
when(io.req.fire && vsatp_mode_mismatch) {
all_real_entries.foreach(_.invalidate(true.B))
v_entries_use_stage1 := vstage1_en
}
when (multipleHits || reset.asBool) {
all_real_entries.foreach(_.invalidate())
}
ccover(io.ptw.req.fire, "MISS", "TLB miss")
ccover(io.ptw.req.valid && !io.ptw.req.ready, "PTW_STALL", "TLB miss, but PTW busy")
ccover(state === s_wait_invalidate, "SFENCE_DURING_REFILL", "flush TLB during TLB refill")
ccover(sfence && !io.sfence.bits.rs1 && !io.sfence.bits.rs2, "SFENCE_ALL", "flush TLB")
ccover(sfence && !io.sfence.bits.rs1 && io.sfence.bits.rs2, "SFENCE_ASID", "flush TLB ASID")
ccover(sfence && io.sfence.bits.rs1 && !io.sfence.bits.rs2, "SFENCE_LINE", "flush TLB line")
ccover(sfence && io.sfence.bits.rs1 && io.sfence.bits.rs2, "SFENCE_LINE_ASID", "flush TLB line/ASID")
ccover(multipleHits, "MULTIPLE_HITS", "Two matching translations in TLB")
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"${if (instruction) "I" else "D"}TLB_$label", "MemorySystem;;" + desc)
/** Decides which entry to be replaced
*
* If there is a invalid entry, replace it with priorityencoder;
* if not, replace the alt entry
*
* @return mask for TLBEntry replacement
*/
def replacementEntry(set: Seq[TLBEntry], alt: UInt) = {
val valids = set.map(_.valid.orR).asUInt
Mux(valids.andR, alt, PriorityEncoder(~valids))
}
}
File TLBPermissions.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes, RegionType, AddressDecoder}
import freechips.rocketchip.tilelink.TLManagerParameters
case class TLBPermissions(
homogeneous: Bool, // if false, the below are undefined
r: Bool, // readable
w: Bool, // writeable
x: Bool, // executable
c: Bool, // cacheable
a: Bool, // arithmetic ops
l: Bool) // logical ops
object TLBPageLookup
{
private case class TLBFixedPermissions(
e: Boolean, // get-/put-effects
r: Boolean, // readable
w: Boolean, // writeable
x: Boolean, // executable
c: Boolean, // cacheable
a: Boolean, // arithmetic ops
l: Boolean) { // logical ops
val useful = r || w || x || c || a || l
}
private def groupRegions(managers: Seq[TLManagerParameters]): Map[TLBFixedPermissions, Seq[AddressSet]] = {
val permissions = managers.map { m =>
(m.address, TLBFixedPermissions(
e = Seq(RegionType.PUT_EFFECTS, RegionType.GET_EFFECTS) contains m.regionType,
r = m.supportsGet || m.supportsAcquireB, // if cached, never uses Get
w = m.supportsPutFull || m.supportsAcquireT, // if cached, never uses Put
x = m.executable,
c = m.supportsAcquireB,
a = m.supportsArithmetic,
l = m.supportsLogical))
}
permissions
.filter(_._2.useful) // get rid of no-permission devices
.groupBy(_._2) // group by permission type
.mapValues(seq =>
AddressSet.unify(seq.flatMap(_._1))) // coalesce same-permission regions
.toMap
}
// Unmapped memory is considered to be inhomogeneous
def apply(managers: Seq[TLManagerParameters], xLen: Int, cacheBlockBytes: Int, pageSize: BigInt, maxRequestBytes: Int): UInt => TLBPermissions = {
require (isPow2(xLen) && xLen >= 8)
require (isPow2(cacheBlockBytes) && cacheBlockBytes >= xLen/8)
require (isPow2(pageSize) && pageSize >= cacheBlockBytes)
val xferSizes = TransferSizes(cacheBlockBytes, cacheBlockBytes)
val allSizes = TransferSizes(1, maxRequestBytes)
val amoSizes = TransferSizes(4, xLen/8)
val permissions = managers.foreach { m =>
require (!m.supportsGet || m.supportsGet .contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsGet} Get, but must support ${allSizes}")
require (!m.supportsPutFull || m.supportsPutFull .contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsPutFull} PutFull, but must support ${allSizes}")
require (!m.supportsPutPartial || m.supportsPutPartial.contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsPutPartial} PutPartial, but must support ${allSizes}")
require (!m.supportsAcquireB || m.supportsAcquireB .contains(xferSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsAcquireB} AcquireB, but must support ${xferSizes}")
require (!m.supportsAcquireT || m.supportsAcquireT .contains(xferSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsAcquireT} AcquireT, but must support ${xferSizes}")
require (!m.supportsLogical || m.supportsLogical .contains(amoSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsLogical} Logical, but must support ${amoSizes}")
require (!m.supportsArithmetic || m.supportsArithmetic.contains(amoSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsArithmetic} Arithmetic, but must support ${amoSizes}")
require (!(m.supportsAcquireB && m.supportsPutFull && !m.supportsAcquireT), s"Memory region '${m.name}' supports AcquireB (cached read) and PutFull (un-cached write) but not AcquireT (cached write)")
}
val grouped = groupRegions(managers)
.mapValues(_.filter(_.alignment >= pageSize)) // discard any region that's not big enough
def lowCostProperty(prop: TLBFixedPermissions => Boolean): UInt => Bool = {
val (yesm, nom) = grouped.partition { case (k, eq) => prop(k) }
val (yes, no) = (yesm.values.flatten.toList, nom.values.flatten.toList)
// Find the minimal bits needed to distinguish between yes and no
val decisionMask = AddressDecoder(Seq(yes, no))
def simplify(x: Seq[AddressSet]) = AddressSet.unify(x.map(_.widen(~decisionMask)).distinct)
val (yesf, nof) = (simplify(yes), simplify(no))
if (yesf.size < no.size) {
(x: UInt) => yesf.map(_.contains(x)).foldLeft(false.B)(_ || _)
} else {
(x: UInt) => !nof.map(_.contains(x)).foldLeft(false.B)(_ || _)
}
}
// Derive simplified property circuits (don't care when !homo)
val rfn = lowCostProperty(_.r)
val wfn = lowCostProperty(_.w)
val xfn = lowCostProperty(_.x)
val cfn = lowCostProperty(_.c)
val afn = lowCostProperty(_.a)
val lfn = lowCostProperty(_.l)
val homo = AddressSet.unify(grouped.values.flatten.toList)
(x: UInt) => TLBPermissions(
homogeneous = homo.map(_.contains(x)).foldLeft(false.B)(_ || _),
r = rfn(x),
w = wfn(x),
x = xfn(x),
c = cfn(x),
a = afn(x),
l = lfn(x))
}
// Are all pageSize intervals of mapped regions homogeneous?
def homogeneous(managers: Seq[TLManagerParameters], pageSize: BigInt): Boolean = {
groupRegions(managers).values.forall(_.forall(_.alignment >= pageSize))
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File PTW.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Arbiter, Cat, Decoupled, Enum, Mux1H, OHToUInt, PopCount, PriorityEncoder, PriorityEncoderOH, RegEnable, UIntToOH, Valid, is, isPow2, log2Ceil, switch}
import chisel3.withClock
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.subsystem.CacheBlockBytes
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
import scala.collection.mutable.ListBuffer
/** PTE request from TLB to PTW
*
* TLB send a PTE request to PTW when L1TLB miss
*/
class PTWReq(implicit p: Parameters) extends CoreBundle()(p) {
val addr = UInt(vpnBits.W)
val need_gpa = Bool()
val vstage1 = Bool()
val stage2 = Bool()
}
/** PTE info from L2TLB to TLB
*
* containing: target PTE, exceptions, two-satge tanslation info
*/
class PTWResp(implicit p: Parameters) extends CoreBundle()(p) {
/** ptw access exception */
val ae_ptw = Bool()
/** final access exception */
val ae_final = Bool()
/** page fault */
val pf = Bool()
/** guest page fault */
val gf = Bool()
/** hypervisor read */
val hr = Bool()
/** hypervisor write */
val hw = Bool()
/** hypervisor execute */
val hx = Bool()
/** PTE to refill L1TLB
*
* source: L2TLB
*/
val pte = new PTE
/** pte pglevel */
val level = UInt(log2Ceil(pgLevels).W)
/** fragmented_superpage support */
val fragmented_superpage = Bool()
/** homogeneous for both pma and pmp */
val homogeneous = Bool()
val gpa = Valid(UInt(vaddrBits.W))
val gpa_is_pte = Bool()
}
/** IO between TLB and PTW
*
* PTW receives :
* - PTE request
* - CSRs info
* - pmp results from PMP(in TLB)
*/
class TLBPTWIO(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreParameters {
val req = Decoupled(Valid(new PTWReq))
val resp = Flipped(Valid(new PTWResp))
val ptbr = Input(new PTBR())
val hgatp = Input(new PTBR())
val vsatp = Input(new PTBR())
val status = Input(new MStatus())
val hstatus = Input(new HStatus())
val gstatus = Input(new MStatus())
val pmp = Input(Vec(nPMPs, new PMP))
val customCSRs = Flipped(coreParams.customCSRs)
}
/** PTW performance statistics */
class PTWPerfEvents extends Bundle {
val l2miss = Bool()
val l2hit = Bool()
val pte_miss = Bool()
val pte_hit = Bool()
}
/** Datapath IO between PTW and Core
*
* PTW receives CSRs info, pmp checks, sfence instruction info
*
* PTW sends its performance statistics to core
*/
class DatapathPTWIO(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreParameters {
val ptbr = Input(new PTBR())
val hgatp = Input(new PTBR())
val vsatp = Input(new PTBR())
val sfence = Flipped(Valid(new SFenceReq))
val status = Input(new MStatus())
val hstatus = Input(new HStatus())
val gstatus = Input(new MStatus())
val pmp = Input(Vec(nPMPs, new PMP))
val perf = Output(new PTWPerfEvents())
val customCSRs = Flipped(coreParams.customCSRs)
/** enable clock generated by ptw */
val clock_enabled = Output(Bool())
}
/** PTE template for transmission
*
* contains useful methods to check PTE attributes
* @see RV-priv spec 4.3.1 for pgae table entry format
*/
class PTE(implicit p: Parameters) extends CoreBundle()(p) {
val reserved_for_future = UInt(10.W)
val ppn = UInt(44.W)
val reserved_for_software = Bits(2.W)
/** dirty bit */
val d = Bool()
/** access bit */
val a = Bool()
/** global mapping */
val g = Bool()
/** user mode accessible */
val u = Bool()
/** whether the page is executable */
val x = Bool()
/** whether the page is writable */
val w = Bool()
/** whether the page is readable */
val r = Bool()
/** valid bit */
val v = Bool()
/** return true if find a pointer to next level page table */
def table(dummy: Int = 0) = v && !r && !w && !x && !d && !a && !u && reserved_for_future === 0.U
/** return true if find a leaf PTE */
def leaf(dummy: Int = 0) = v && (r || (x && !w)) && a
/** user read */
def ur(dummy: Int = 0) = sr() && u
/** user write*/
def uw(dummy: Int = 0) = sw() && u
/** user execute */
def ux(dummy: Int = 0) = sx() && u
/** supervisor read */
def sr(dummy: Int = 0) = leaf() && r
/** supervisor write */
def sw(dummy: Int = 0) = leaf() && w && d
/** supervisor execute */
def sx(dummy: Int = 0) = leaf() && x
/** full permission: writable and executable in user mode */
def isFullPerm(dummy: Int = 0) = uw() && ux()
}
/** L2TLB PTE template
*
* contains tag bits
* @param nSets number of sets in L2TLB
* @see RV-priv spec 4.3.1 for page table entry format
*/
class L2TLBEntry(nSets: Int)(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreParameters {
val idxBits = log2Ceil(nSets)
val tagBits = maxSVAddrBits - pgIdxBits - idxBits + (if (usingHypervisor) 1 else 0)
val tag = UInt(tagBits.W)
val ppn = UInt(ppnBits.W)
/** dirty bit */
val d = Bool()
/** access bit */
val a = Bool()
/** user mode accessible */
val u = Bool()
/** whether the page is executable */
val x = Bool()
/** whether the page is writable */
val w = Bool()
/** whether the page is readable */
val r = Bool()
}
/** PTW contains L2TLB, and performs page table walk for high level TLB, and cache queries from L1 TLBs(I$, D$, RoCC)
*
* It performs hierarchy page table query to mem for the desired leaf PTE and cache them in l2tlb.
* Besides leaf PTEs, it also caches non-leaf PTEs in pte_cache to accerlerate the process.
*
* ==Structure==
* - l2tlb : for leaf PTEs
* - set-associative (configurable with [[CoreParams.nL2TLBEntries]]and [[CoreParams.nL2TLBWays]]))
* - PLRU
* - pte_cache: for non-leaf PTEs
* - set-associative
* - LRU
* - s2_pte_cache: for non-leaf PTEs in 2-stage translation
* - set-associative
* - PLRU
*
* l2tlb Pipeline: 3 stage
* {{{
* stage 0 : read
* stage 1 : decode
* stage 2 : hit check
* }}}
* ==State Machine==
* s_ready: ready to reveive request from TLB
* s_req: request mem; pte_cache hit judge
* s_wait1: deal with l2tlb error
* s_wait2: final hit judge
* s_wait3: receive mem response
* s_fragment_superpage: for superpage PTE
*
* @note l2tlb hit happens in s_req or s_wait1
* @see RV-priv spec 4.3-4.6 for Virtual-Memory System
* @see RV-priv spec 8.5 for Two-Stage Address Translation
* @todo details in two-stage translation
*/
class PTW(n: Int)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) {
val io = IO(new Bundle {
/** to n TLB */
val requestor = Flipped(Vec(n, new TLBPTWIO))
/** to HellaCache */
val mem = new HellaCacheIO
/** to Core
*
* contains CSRs info and performance statistics
*/
val dpath = new DatapathPTWIO
})
val s_ready :: s_req :: s_wait1 :: s_dummy1 :: s_wait2 :: s_wait3 :: s_dummy2 :: s_fragment_superpage :: Nil = Enum(8)
val state = RegInit(s_ready)
val l2_refill_wire = Wire(Bool())
/** Arbiter to arbite request from n TLB */
val arb = Module(new Arbiter(Valid(new PTWReq), n))
// use TLB req as arbitor's input
arb.io.in <> io.requestor.map(_.req)
// receive req only when s_ready and not in refill
arb.io.out.ready := (state === s_ready) && !l2_refill_wire
val resp_valid = RegNext(VecInit(Seq.fill(io.requestor.size)(false.B)))
val clock_en = state =/= s_ready || l2_refill_wire || arb.io.out.valid || io.dpath.sfence.valid || io.dpath.customCSRs.disableDCacheClockGate
io.dpath.clock_enabled := usingVM.B && clock_en
val gated_clock =
if (!usingVM || !tileParams.dcache.get.clockGate) clock
else ClockGate(clock, clock_en, "ptw_clock_gate")
withClock (gated_clock) { // entering gated-clock domain
val invalidated = Reg(Bool())
/** current PTE level
* {{{
* 0 <= count <= pgLevel-1
* count = pgLevel - 1 : leaf PTE
* count < pgLevel - 1 : non-leaf PTE
* }}}
*/
val count = Reg(UInt(log2Ceil(pgLevels).W))
val resp_ae_ptw = Reg(Bool())
val resp_ae_final = Reg(Bool())
val resp_pf = Reg(Bool())
val resp_gf = Reg(Bool())
val resp_hr = Reg(Bool())
val resp_hw = Reg(Bool())
val resp_hx = Reg(Bool())
val resp_fragmented_superpage = Reg(Bool())
/** tlb request */
val r_req = Reg(new PTWReq)
/** current selected way in arbitor */
val r_req_dest = Reg(Bits())
// to respond to L1TLB : l2_hit
// to construct mem.req.addr
val r_pte = Reg(new PTE)
val r_hgatp = Reg(new PTBR)
// 2-stage pageLevel
val aux_count = Reg(UInt(log2Ceil(pgLevels).W))
/** pte for 2-stage translation */
val aux_pte = Reg(new PTE)
val gpa_pgoff = Reg(UInt(pgIdxBits.W)) // only valid in resp_gf case
val stage2 = Reg(Bool())
val stage2_final = Reg(Bool())
val satp = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp, io.dpath.ptbr)
val r_hgatp_initial_count = pgLevels.U - minPgLevels.U - r_hgatp.additionalPgLevels
/** 2-stage translation both enable */
val do_both_stages = r_req.vstage1 && r_req.stage2
val max_count = count max aux_count
val vpn = Mux(r_req.vstage1 && stage2, aux_pte.ppn, r_req.addr)
val mem_resp_valid = RegNext(io.mem.resp.valid)
val mem_resp_data = RegNext(io.mem.resp.bits.data)
io.mem.uncached_resp.map { resp =>
assert(!(resp.valid && io.mem.resp.valid))
resp.ready := true.B
when (resp.valid) {
mem_resp_valid := true.B
mem_resp_data := resp.bits.data
}
}
// construct pte from mem.resp
val (pte, invalid_paddr, invalid_gpa) = {
val tmp = mem_resp_data.asTypeOf(new PTE())
val res = WireDefault(tmp)
res.ppn := Mux(do_both_stages && !stage2, tmp.ppn(vpnBits.min(tmp.ppn.getWidth)-1, 0), tmp.ppn(ppnBits-1, 0))
when (tmp.r || tmp.w || tmp.x) {
// for superpage mappings, make sure PPN LSBs are zero
for (i <- 0 until pgLevels-1)
when (count <= i.U && tmp.ppn((pgLevels-1-i)*pgLevelBits-1, (pgLevels-2-i)*pgLevelBits) =/= 0.U) { res.v := false.B }
}
(res,
Mux(do_both_stages && !stage2, (tmp.ppn >> vpnBits) =/= 0.U, (tmp.ppn >> ppnBits) =/= 0.U),
do_both_stages && !stage2 && checkInvalidHypervisorGPA(r_hgatp, tmp.ppn))
}
// find non-leaf PTE, need traverse
val traverse = pte.table() && !invalid_paddr && !invalid_gpa && count < (pgLevels-1).U
/** address send to mem for enquerry */
val pte_addr = if (!usingVM) 0.U else {
val vpn_idxs = (0 until pgLevels).map { i =>
val width = pgLevelBits + (if (i <= pgLevels - minPgLevels) hypervisorExtraAddrBits else 0)
(vpn >> (pgLevels - i - 1) * pgLevelBits)(width - 1, 0)
}
val mask = Mux(stage2 && count === r_hgatp_initial_count, ((1 << (hypervisorExtraAddrBits + pgLevelBits)) - 1).U, ((1 << pgLevelBits) - 1).U)
val vpn_idx = vpn_idxs(count) & mask
val raw_pte_addr = ((r_pte.ppn << pgLevelBits) | vpn_idx) << log2Ceil(xLen / 8)
val size = if (usingHypervisor) vaddrBits else paddrBits
//use r_pte.ppn as page table base address
//use vpn slice as offset
raw_pte_addr.apply(size.min(raw_pte_addr.getWidth) - 1, 0)
}
/** stage2_pte_cache input addr */
val stage2_pte_cache_addr = if (!usingHypervisor) 0.U else {
val vpn_idxs = (0 until pgLevels - 1).map { i =>
(r_req.addr >> (pgLevels - i - 1) * pgLevelBits)(pgLevelBits - 1, 0)
}
val vpn_idx = vpn_idxs(aux_count)
val raw_s2_pte_cache_addr = Cat(aux_pte.ppn, vpn_idx) << log2Ceil(xLen / 8)
raw_s2_pte_cache_addr(vaddrBits.min(raw_s2_pte_cache_addr.getWidth) - 1, 0)
}
def makeFragmentedSuperpagePPN(ppn: UInt): Seq[UInt] = {
(pgLevels-1 until 0 by -1).map(i => Cat(ppn >> (pgLevelBits*i), r_req.addr(((pgLevelBits*i) min vpnBits)-1, 0).padTo(pgLevelBits*i)))
}
/** PTECache caches non-leaf PTE
* @param s2 true: 2-stage address translation
*/
def makePTECache(s2: Boolean): (Bool, UInt) = if (coreParams.nPTECacheEntries == 0) {
(false.B, 0.U)
} else {
val plru = new PseudoLRU(coreParams.nPTECacheEntries)
val valid = RegInit(0.U(coreParams.nPTECacheEntries.W))
val tags = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor) 1 + vaddrBits else paddrBits).W)))
// not include full pte, only ppn
val data = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor && s2) vpnBits else ppnBits).W)))
val can_hit =
if (s2) count === r_hgatp_initial_count && aux_count < (pgLevels-1).U && r_req.vstage1 && stage2 && !stage2_final
else count < (pgLevels-1).U && Mux(r_req.vstage1, stage2, !r_req.stage2)
val can_refill =
if (s2) do_both_stages && !stage2 && !stage2_final
else can_hit
val tag =
if (s2) Cat(true.B, stage2_pte_cache_addr.padTo(vaddrBits))
else Cat(r_req.vstage1, pte_addr.padTo(if (usingHypervisor) vaddrBits else paddrBits))
val hits = tags.map(_ === tag).asUInt & valid
val hit = hits.orR && can_hit
// refill with mem response
when (mem_resp_valid && traverse && can_refill && !hits.orR && !invalidated) {
val r = Mux(valid.andR, plru.way, PriorityEncoder(~valid))
valid := valid | UIntToOH(r)
tags(r) := tag
data(r) := pte.ppn
plru.access(r)
}
// replace
when (hit && state === s_req) { plru.access(OHToUInt(hits)) }
when (io.dpath.sfence.valid && (!io.dpath.sfence.bits.rs1 || usingHypervisor.B && io.dpath.sfence.bits.hg)) { valid := 0.U }
val lcount = if (s2) aux_count else count
for (i <- 0 until pgLevels-1) {
ccover(hit && state === s_req && lcount === i.U, s"PTE_CACHE_HIT_L$i", s"PTE cache hit, level $i")
}
(hit, Mux1H(hits, data))
}
// generate pte_cache
val (pte_cache_hit, pte_cache_data) = makePTECache(false)
// generate pte_cache with 2-stage translation
val (stage2_pte_cache_hit, stage2_pte_cache_data) = makePTECache(true)
// pte_cache hit or 2-stage pte_cache hit
val pte_hit = RegNext(false.B)
io.dpath.perf.pte_miss := false.B
io.dpath.perf.pte_hit := pte_hit && (state === s_req) && !io.dpath.perf.l2hit
assert(!(io.dpath.perf.l2hit && (io.dpath.perf.pte_miss || io.dpath.perf.pte_hit)),
"PTE Cache Hit/Miss Performance Monitor Events are lower priority than L2TLB Hit event")
// l2_refill happens when find the leaf pte
val l2_refill = RegNext(false.B)
l2_refill_wire := l2_refill
io.dpath.perf.l2miss := false.B
io.dpath.perf.l2hit := false.B
// l2tlb
val (l2_hit, l2_error, l2_pte, l2_tlb_ram) = if (coreParams.nL2TLBEntries == 0) (false.B, false.B, WireDefault(0.U.asTypeOf(new PTE)), None) else {
val code = new ParityCode
require(isPow2(coreParams.nL2TLBEntries))
require(isPow2(coreParams.nL2TLBWays))
require(coreParams.nL2TLBEntries >= coreParams.nL2TLBWays)
val nL2TLBSets = coreParams.nL2TLBEntries / coreParams.nL2TLBWays
require(isPow2(nL2TLBSets))
val idxBits = log2Ceil(nL2TLBSets)
val l2_plru = new SetAssocLRU(nL2TLBSets, coreParams.nL2TLBWays, "plru")
val ram = DescribedSRAM(
name = "l2_tlb_ram",
desc = "L2 TLB",
size = nL2TLBSets,
data = Vec(coreParams.nL2TLBWays, UInt(code.width(new L2TLBEntry(nL2TLBSets).getWidth).W))
)
val g = Reg(Vec(coreParams.nL2TLBWays, UInt(nL2TLBSets.W)))
val valid = RegInit(VecInit(Seq.fill(coreParams.nL2TLBWays)(0.U(nL2TLBSets.W))))
// use r_req to construct tag
val (r_tag, r_idx) = Split(Cat(r_req.vstage1, r_req.addr(maxSVAddrBits-pgIdxBits-1, 0)), idxBits)
/** the valid vec for the selected set(including n ways) */
val r_valid_vec = valid.map(_(r_idx)).asUInt
val r_valid_vec_q = Reg(UInt(coreParams.nL2TLBWays.W))
val r_l2_plru_way = Reg(UInt(log2Ceil(coreParams.nL2TLBWays max 1).W))
r_valid_vec_q := r_valid_vec
// replacement way
r_l2_plru_way := (if (coreParams.nL2TLBWays > 1) l2_plru.way(r_idx) else 0.U)
// refill with r_pte(leaf pte)
when (l2_refill && !invalidated) {
val entry = Wire(new L2TLBEntry(nL2TLBSets))
entry.ppn := r_pte.ppn
entry.d := r_pte.d
entry.a := r_pte.a
entry.u := r_pte.u
entry.x := r_pte.x
entry.w := r_pte.w
entry.r := r_pte.r
entry.tag := r_tag
// if all the way are valid, use plru to select one way to be replaced,
// otherwise use PriorityEncoderOH to select one
val wmask = if (coreParams.nL2TLBWays > 1) Mux(r_valid_vec_q.andR, UIntToOH(r_l2_plru_way, coreParams.nL2TLBWays), PriorityEncoderOH(~r_valid_vec_q)) else 1.U(1.W)
ram.write(r_idx, VecInit(Seq.fill(coreParams.nL2TLBWays)(code.encode(entry.asUInt))), wmask.asBools)
val mask = UIntToOH(r_idx)
for (way <- 0 until coreParams.nL2TLBWays) {
when (wmask(way)) {
valid(way) := valid(way) | mask
g(way) := Mux(r_pte.g, g(way) | mask, g(way) & ~mask)
}
}
}
// sfence happens
when (io.dpath.sfence.valid) {
val hg = usingHypervisor.B && io.dpath.sfence.bits.hg
for (way <- 0 until coreParams.nL2TLBWays) {
valid(way) :=
Mux(!hg && io.dpath.sfence.bits.rs1, valid(way) & ~UIntToOH(io.dpath.sfence.bits.addr(idxBits+pgIdxBits-1, pgIdxBits)),
Mux(!hg && io.dpath.sfence.bits.rs2, valid(way) & g(way),
0.U))
}
}
val s0_valid = !l2_refill && arb.io.out.fire
val s0_suitable = arb.io.out.bits.bits.vstage1 === arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.need_gpa
val s1_valid = RegNext(s0_valid && s0_suitable && arb.io.out.bits.valid)
val s2_valid = RegNext(s1_valid)
// read from tlb idx
val s1_rdata = ram.read(arb.io.out.bits.bits.addr(idxBits-1, 0), s0_valid)
val s2_rdata = s1_rdata.map(s1_rdway => code.decode(RegEnable(s1_rdway, s1_valid)))
val s2_valid_vec = RegEnable(r_valid_vec, s1_valid)
val s2_g_vec = RegEnable(VecInit(g.map(_(r_idx))), s1_valid)
val s2_error = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && s2_rdata(way).error).orR
when (s2_valid && s2_error) { valid.foreach { _ := 0.U }}
// decode
val s2_entry_vec = s2_rdata.map(_.uncorrected.asTypeOf(new L2TLBEntry(nL2TLBSets)))
val s2_hit_vec = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && (r_tag === s2_entry_vec(way).tag))
val s2_hit = s2_valid && s2_hit_vec.orR
io.dpath.perf.l2miss := s2_valid && !(s2_hit_vec.orR)
io.dpath.perf.l2hit := s2_hit
when (s2_hit) {
l2_plru.access(r_idx, OHToUInt(s2_hit_vec))
assert((PopCount(s2_hit_vec) === 1.U) || s2_error, "L2 TLB multi-hit")
}
val s2_pte = Wire(new PTE)
val s2_hit_entry = Mux1H(s2_hit_vec, s2_entry_vec)
s2_pte.ppn := s2_hit_entry.ppn
s2_pte.d := s2_hit_entry.d
s2_pte.a := s2_hit_entry.a
s2_pte.g := Mux1H(s2_hit_vec, s2_g_vec)
s2_pte.u := s2_hit_entry.u
s2_pte.x := s2_hit_entry.x
s2_pte.w := s2_hit_entry.w
s2_pte.r := s2_hit_entry.r
s2_pte.v := true.B
s2_pte.reserved_for_future := 0.U
s2_pte.reserved_for_software := 0.U
for (way <- 0 until coreParams.nL2TLBWays) {
ccover(s2_hit && s2_hit_vec(way), s"L2_TLB_HIT_WAY$way", s"L2 TLB hit way$way")
}
(s2_hit, s2_error, s2_pte, Some(ram))
}
// if SFENCE occurs during walk, don't refill PTE cache or L2 TLB until next walk
invalidated := io.dpath.sfence.valid || (invalidated && state =/= s_ready)
// mem request
io.mem.keep_clock_enabled := false.B
io.mem.req.valid := state === s_req || state === s_dummy1
io.mem.req.bits.phys := true.B
io.mem.req.bits.cmd := M_XRD
io.mem.req.bits.size := log2Ceil(xLen/8).U
io.mem.req.bits.signed := false.B
io.mem.req.bits.addr := pte_addr
io.mem.req.bits.idx.foreach(_ := pte_addr)
io.mem.req.bits.dprv := PRV.S.U // PTW accesses are S-mode by definition
io.mem.req.bits.dv := do_both_stages && !stage2
io.mem.req.bits.tag := DontCare
io.mem.req.bits.no_resp := false.B
io.mem.req.bits.no_alloc := DontCare
io.mem.req.bits.no_xcpt := DontCare
io.mem.req.bits.data := DontCare
io.mem.req.bits.mask := DontCare
io.mem.s1_kill := l2_hit || (state =/= s_wait1) || resp_gf
io.mem.s1_data := DontCare
io.mem.s2_kill := false.B
val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits)
require(!usingHypervisor || pageGranularityPMPs, s"hypervisor requires pmpGranularity >= ${1<<pgIdxBits}")
val pmaPgLevelHomogeneous = (0 until pgLevels) map { i =>
val pgSize = BigInt(1) << (pgIdxBits + ((pgLevels - 1 - i) * pgLevelBits))
if (pageGranularityPMPs && i == pgLevels - 1) {
require(TLBPageLookup.homogeneous(edge.manager.managers, pgSize), s"All memory regions must be $pgSize-byte aligned")
true.B
} else {
TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), pgSize, xLen/8)(r_pte.ppn << pgIdxBits).homogeneous
}
}
val pmaHomogeneous = pmaPgLevelHomogeneous(count)
val pmpHomogeneous = new PMPHomogeneityChecker(io.dpath.pmp).apply(r_pte.ppn << pgIdxBits, count)
val homogeneous = pmaHomogeneous && pmpHomogeneous
// response to tlb
for (i <- 0 until io.requestor.size) {
io.requestor(i).resp.valid := resp_valid(i)
io.requestor(i).resp.bits.ae_ptw := resp_ae_ptw
io.requestor(i).resp.bits.ae_final := resp_ae_final
io.requestor(i).resp.bits.pf := resp_pf
io.requestor(i).resp.bits.gf := resp_gf
io.requestor(i).resp.bits.hr := resp_hr
io.requestor(i).resp.bits.hw := resp_hw
io.requestor(i).resp.bits.hx := resp_hx
io.requestor(i).resp.bits.pte := r_pte
io.requestor(i).resp.bits.level := max_count
io.requestor(i).resp.bits.homogeneous := homogeneous || pageGranularityPMPs.B
io.requestor(i).resp.bits.fragmented_superpage := resp_fragmented_superpage && pageGranularityPMPs.B
io.requestor(i).resp.bits.gpa.valid := r_req.need_gpa
io.requestor(i).resp.bits.gpa.bits :=
Cat(Mux(!stage2_final || !r_req.vstage1 || aux_count === (pgLevels - 1).U, aux_pte.ppn, makeFragmentedSuperpagePPN(aux_pte.ppn)(aux_count)), gpa_pgoff)
io.requestor(i).resp.bits.gpa_is_pte := !stage2_final
io.requestor(i).ptbr := io.dpath.ptbr
io.requestor(i).hgatp := io.dpath.hgatp
io.requestor(i).vsatp := io.dpath.vsatp
io.requestor(i).customCSRs <> io.dpath.customCSRs
io.requestor(i).status := io.dpath.status
io.requestor(i).hstatus := io.dpath.hstatus
io.requestor(i).gstatus := io.dpath.gstatus
io.requestor(i).pmp := io.dpath.pmp
}
// control state machine
val next_state = WireDefault(state)
state := OptimizationBarrier(next_state)
val do_switch = WireDefault(false.B)
switch (state) {
is (s_ready) {
when (arb.io.out.fire) {
val satp_initial_count = pgLevels.U - minPgLevels.U - satp.additionalPgLevels
val vsatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.vsatp.additionalPgLevels
val hgatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.hgatp.additionalPgLevels
val aux_ppn = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp.ppn, arb.io.out.bits.bits.addr)
r_req := arb.io.out.bits.bits
r_req_dest := arb.io.chosen
next_state := Mux(arb.io.out.bits.valid, s_req, s_ready)
stage2 := arb.io.out.bits.bits.stage2
stage2_final := arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.vstage1
count := Mux(arb.io.out.bits.bits.stage2, hgatp_initial_count, satp_initial_count)
aux_count := Mux(arb.io.out.bits.bits.vstage1, vsatp_initial_count, 0.U)
aux_pte.ppn := aux_ppn
aux_pte.reserved_for_future := 0.U
resp_ae_ptw := false.B
resp_ae_final := false.B
resp_pf := false.B
resp_gf := checkInvalidHypervisorGPA(io.dpath.hgatp, aux_ppn) && arb.io.out.bits.bits.stage2
resp_hr := true.B
resp_hw := true.B
resp_hx := true.B
resp_fragmented_superpage := false.B
r_hgatp := io.dpath.hgatp
assert(!arb.io.out.bits.bits.need_gpa || arb.io.out.bits.bits.stage2)
}
}
is (s_req) {
when(stage2 && count === r_hgatp_initial_count) {
gpa_pgoff := Mux(aux_count === (pgLevels-1).U, r_req.addr << (xLen/8).log2, stage2_pte_cache_addr)
}
// pte_cache hit
when (stage2_pte_cache_hit) {
aux_count := aux_count + 1.U
aux_pte.ppn := stage2_pte_cache_data
aux_pte.reserved_for_future := 0.U
pte_hit := true.B
}.elsewhen (pte_cache_hit) {
count := count + 1.U
pte_hit := true.B
}.otherwise {
next_state := Mux(io.mem.req.ready, s_wait1, s_req)
}
when(resp_gf) {
next_state := s_ready
resp_valid(r_req_dest) := true.B
}
}
is (s_wait1) {
// This Mux is for the l2_error case; the l2_hit && !l2_error case is overriden below
next_state := Mux(l2_hit, s_req, s_wait2)
}
is (s_wait2) {
next_state := s_wait3
io.dpath.perf.pte_miss := count < (pgLevels-1).U
when (io.mem.s2_xcpt.ae.ld) {
resp_ae_ptw := true.B
next_state := s_ready
resp_valid(r_req_dest) := true.B
}
}
is (s_fragment_superpage) {
next_state := s_ready
resp_valid(r_req_dest) := true.B
when (!homogeneous) {
count := (pgLevels-1).U
resp_fragmented_superpage := true.B
}
when (do_both_stages) {
resp_fragmented_superpage := true.B
}
}
}
val merged_pte = {
val superpage_masks = (0 until pgLevels).map(i => ((BigInt(1) << pte.ppn.getWidth) - (BigInt(1) << (pgLevels-1-i)*pgLevelBits)).U)
val superpage_mask = superpage_masks(Mux(stage2_final, max_count, (pgLevels-1).U))
val stage1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), aux_pte.ppn((pgLevels-i-1)*pgLevelBits-1,0))) :+ pte.ppn
val stage1_ppn = stage1_ppns(count)
makePTE(stage1_ppn & superpage_mask, aux_pte)
}
r_pte := OptimizationBarrier(
// l2tlb hit->find a leaf PTE(l2_pte), respond to L1TLB
Mux(l2_hit && !l2_error && !resp_gf, l2_pte,
// S2 PTE cache hit -> proceed to the next level of walking, update the r_pte with hgatp
Mux(state === s_req && stage2_pte_cache_hit, makeHypervisorRootPTE(r_hgatp, stage2_pte_cache_data, l2_pte),
// pte cache hit->find a non-leaf PTE(pte_cache),continue to request mem
Mux(state === s_req && pte_cache_hit, makePTE(pte_cache_data, l2_pte),
// 2-stage translation
Mux(do_switch, makeHypervisorRootPTE(r_hgatp, pte.ppn, r_pte),
// when mem respond, store mem.resp.pte
Mux(mem_resp_valid, Mux(!traverse && r_req.vstage1 && stage2, merged_pte, pte),
// fragment_superpage
Mux(state === s_fragment_superpage && !homogeneous && count =/= (pgLevels - 1).U, makePTE(makeFragmentedSuperpagePPN(r_pte.ppn)(count), r_pte),
// when tlb request come->request mem, use root address in satp(or vsatp,hgatp)
Mux(arb.io.out.fire, Mux(arb.io.out.bits.bits.stage2, makeHypervisorRootPTE(io.dpath.hgatp, io.dpath.vsatp.ppn, r_pte), makePTE(satp.ppn, r_pte)),
r_pte))))))))
when (l2_hit && !l2_error && !resp_gf) {
assert(state === s_req || state === s_wait1)
next_state := s_ready
resp_valid(r_req_dest) := true.B
count := (pgLevels-1).U
}
when (mem_resp_valid) {
assert(state === s_wait3)
next_state := s_req
when (traverse) {
when (do_both_stages && !stage2) { do_switch := true.B }
count := count + 1.U
}.otherwise {
val gf = (stage2 && !stage2_final && !pte.ur()) || (pte.leaf() && pte.reserved_for_future === 0.U && invalid_gpa)
val ae = pte.v && invalid_paddr
val pf = pte.v && pte.reserved_for_future =/= 0.U
val success = pte.v && !ae && !pf && !gf
when (do_both_stages && !stage2_final && success) {
when (stage2) {
stage2 := false.B
count := aux_count
}.otherwise {
stage2_final := true.B
do_switch := true.B
}
}.otherwise {
// find a leaf pte, start l2 refill
l2_refill := success && count === (pgLevels-1).U && !r_req.need_gpa &&
(!r_req.vstage1 && !r_req.stage2 ||
do_both_stages && aux_count === (pgLevels-1).U && pte.isFullPerm())
count := max_count
when (pageGranularityPMPs.B && !(count === (pgLevels-1).U && (!do_both_stages || aux_count === (pgLevels-1).U))) {
next_state := s_fragment_superpage
}.otherwise {
next_state := s_ready
resp_valid(r_req_dest) := true.B
}
resp_ae_ptw := ae && count < (pgLevels-1).U && pte.table()
resp_ae_final := ae && pte.leaf()
resp_pf := pf && !stage2
resp_gf := gf || (pf && stage2)
resp_hr := !stage2 || (!pf && !gf && pte.ur())
resp_hw := !stage2 || (!pf && !gf && pte.uw())
resp_hx := !stage2 || (!pf && !gf && pte.ux())
}
}
}
when (io.mem.s2_nack) {
assert(state === s_wait2)
next_state := s_req
}
when (do_switch) {
aux_count := Mux(traverse, count + 1.U, count)
count := r_hgatp_initial_count
aux_pte := Mux(traverse, pte, {
val s1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), r_req.addr(((pgLevels-i-1)*pgLevelBits min vpnBits)-1,0).padTo((pgLevels-i-1)*pgLevelBits))) :+ pte.ppn
makePTE(s1_ppns(count), pte)
})
stage2 := true.B
}
for (i <- 0 until pgLevels) {
val leaf = mem_resp_valid && !traverse && count === i.U
ccover(leaf && pte.v && !invalid_paddr && !invalid_gpa && pte.reserved_for_future === 0.U, s"L$i", s"successful page-table access, level $i")
ccover(leaf && pte.v && invalid_paddr, s"L${i}_BAD_PPN_MSB", s"PPN too large, level $i")
ccover(leaf && pte.v && invalid_gpa, s"L${i}_BAD_GPA_MSB", s"GPA too large, level $i")
ccover(leaf && pte.v && pte.reserved_for_future =/= 0.U, s"L${i}_BAD_RSV_MSB", s"reserved MSBs set, level $i")
ccover(leaf && !mem_resp_data(0), s"L${i}_INVALID_PTE", s"page not present, level $i")
if (i != pgLevels-1)
ccover(leaf && !pte.v && mem_resp_data(0), s"L${i}_BAD_PPN_LSB", s"PPN LSBs not zero, level $i")
}
ccover(mem_resp_valid && count === (pgLevels-1).U && pte.table(), s"TOO_DEEP", s"page table too deep")
ccover(io.mem.s2_nack, "NACK", "D$ nacked page-table access")
ccover(state === s_wait2 && io.mem.s2_xcpt.ae.ld, "AE", "access exception while walking page table")
} // leaving gated-clock domain
private def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
if (usingVM) property.cover(cond, s"PTW_$label", "MemorySystem;;" + desc)
/** Relace PTE.ppn with ppn */
private def makePTE(ppn: UInt, default: PTE) = {
val pte = WireDefault(default)
pte.ppn := ppn
pte
}
/** use hgatp and vpn to construct a new ppn */
private def makeHypervisorRootPTE(hgatp: PTBR, vpn: UInt, default: PTE) = {
val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels
val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> (pgLevels-i)*pgLevelBits))
val lsbs = WireDefault(UInt(maxHypervisorExtraAddrBits.W), idxs(count))
val pte = WireDefault(default)
pte.ppn := Cat(hgatp.ppn >> maxHypervisorExtraAddrBits, lsbs)
pte
}
/** use hgatp and vpn to check for gpa out of range */
private def checkInvalidHypervisorGPA(hgatp: PTBR, vpn: UInt) = {
val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels
val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> ((pgLevels-i)*pgLevelBits)+maxHypervisorExtraAddrBits))
idxs.extract(count) =/= 0.U
}
}
/** Mix-ins for constructing tiles that might have a PTW */
trait CanHavePTW extends HasTileParameters with HasHellaCache { this: BaseTile =>
val module: CanHavePTWModule
var nPTWPorts = 1
nDCachePorts += usingPTW.toInt
}
trait CanHavePTWModule extends HasHellaCacheModule {
val outer: CanHavePTW
val ptwPorts = ListBuffer(outer.dcache.module.io.ptw)
val ptw = Module(new PTW(outer.nPTWPorts)(outer.dcache.node.edges.out(0), outer.p))
ptw.io.mem <> DontCare
if (outer.usingPTW) {
dcachePorts += ptw.io.mem
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File DCache.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.amba.AMBAProt
import freechips.rocketchip.diplomacy.{BufferParams}
import freechips.rocketchip.prci.{ClockCrossingType, RationalCrossing, SynchronousCrossing, AsynchronousCrossing, CreditedCrossing}
import freechips.rocketchip.tile.{CoreBundle, LookupByHartId}
import freechips.rocketchip.tilelink.{TLFIFOFixer,ClientMetadata, TLBundleA, TLAtomics, TLBundleB, TLPermissions}
import freechips.rocketchip.tilelink.TLMessages.{AccessAck, HintAck, AccessAckData, Grant, GrantData, ReleaseAck}
import freechips.rocketchip.util.{CanHaveErrors, ClockGate, IdentityCode, ReplacementPolicy, DescribedSRAM, property}
import freechips.rocketchip.util.BooleanToAugmentedBoolean
import freechips.rocketchip.util.UIntToAugmentedUInt
import freechips.rocketchip.util.UIntIsOneOf
import freechips.rocketchip.util.IntToAugmentedInt
import freechips.rocketchip.util.SeqToAugmentedSeq
import freechips.rocketchip.util.SeqBoolBitwiseOps
// TODO: delete this trait once deduplication is smart enough to avoid globally inlining matching circuits
trait InlineInstance { self: chisel3.experimental.BaseModule =>
chisel3.experimental.annotate(
new chisel3.experimental.ChiselAnnotation {
def toFirrtl: firrtl.annotations.Annotation = firrtl.passes.InlineAnnotation(self.toNamed) } )
}
class DCacheErrors(implicit p: Parameters) extends L1HellaCacheBundle()(p)
with CanHaveErrors {
val correctable = (cacheParams.tagCode.canCorrect || cacheParams.dataCode.canCorrect).option(Valid(UInt(paddrBits.W)))
val uncorrectable = (cacheParams.tagCode.canDetect || cacheParams.dataCode.canDetect).option(Valid(UInt(paddrBits.W)))
val bus = Valid(UInt(paddrBits.W))
}
class DCacheDataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val addr = UInt(untagBits.W)
val write = Bool()
val wdata = UInt((encBits * rowBytes / eccBytes).W)
val wordMask = UInt((rowBytes / subWordBytes).W)
val eccMask = UInt((wordBytes / eccBytes).W)
val way_en = UInt(nWays.W)
}
class DCacheDataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Valid(new DCacheDataReq))
val resp = Output(Vec(nWays, UInt((req.bits.wdata.getWidth).W)))
})
require(rowBits % subWordBits == 0, "rowBits must be a multiple of subWordBits")
val eccMask = if (eccBits == subWordBits) Seq(true.B) else io.req.bits.eccMask.asBools
val wMask = if (nWays == 1) eccMask else (0 until nWays).flatMap(i => eccMask.map(_ && io.req.bits.way_en(i)))
val wWords = io.req.bits.wdata.grouped(encBits * (subWordBits / eccBits))
val addr = io.req.bits.addr >> rowOffBits
val data_arrays = Seq.tabulate(rowBits / subWordBits) {
i =>
DescribedSRAM(
name = s"${tileParams.baseName}_dcache_data_arrays_${i}",
desc = "DCache Data Array",
size = nSets * cacheBlockBytes / rowBytes,
data = Vec(nWays * (subWordBits / eccBits), UInt(encBits.W))
)
}
val rdata = for ((array , i) <- data_arrays.zipWithIndex) yield {
val valid = io.req.valid && ((data_arrays.size == 1).B || io.req.bits.wordMask(i))
when (valid && io.req.bits.write) {
val wMaskSlice = (0 until wMask.size).filter(j => i % (wordBits/subWordBits) == (j % (wordBytes/eccBytes)) / (subWordBytes/eccBytes)).map(wMask(_))
val wData = wWords(i).grouped(encBits)
array.write(addr, VecInit((0 until nWays).flatMap(i => wData)), wMaskSlice)
}
val data = array.read(addr, valid && !io.req.bits.write)
data.grouped(subWordBits / eccBits).map(_.asUInt).toSeq
}
(io.resp zip rdata.transpose).foreach { case (resp, data) => resp := data.asUInt }
}
class DCacheMetadataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val write = Bool()
val addr = UInt(vaddrBitsExtended.W)
val idx = UInt(idxBits.W)
val way_en = UInt(nWays.W)
val data = UInt(cacheParams.tagCode.width(new L1Metadata().getWidth).W)
}
class DCache(staticIdForMetadataUseOnly: Int, val crossing: ClockCrossingType)(implicit p: Parameters) extends HellaCache(staticIdForMetadataUseOnly)(p) {
override lazy val module = new DCacheModule(this)
}
class DCacheTLBPort(implicit p: Parameters) extends CoreBundle()(p) {
val req = Flipped(Decoupled(new TLBReq(coreDataBytes.log2)))
val s1_resp = Output(new TLBResp(coreDataBytes.log2))
val s2_kill = Input(Bool())
}
class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
val tECC = cacheParams.tagCode
val dECC = cacheParams.dataCode
require(subWordBits % eccBits == 0, "subWordBits must be a multiple of eccBits")
require(eccBytes == 1 || !dECC.isInstanceOf[IdentityCode])
require(cacheParams.silentDrop || cacheParams.acquireBeforeRelease, "!silentDrop requires acquireBeforeRelease")
val usingRMW = eccBytes > 1 || usingAtomicsInCache
val mmioOffset = outer.firstMMIO
edge.manager.requireFifo(TLFIFOFixer.allVolatile) // TileLink pipelining MMIO requests
val clock_en_reg = Reg(Bool())
io.cpu.clock_enabled := clock_en_reg
val gated_clock =
if (!cacheParams.clockGate) clock
else ClockGate(clock, clock_en_reg, "dcache_clock_gate")
class DCacheModuleImpl { // entering gated-clock domain
val tlb = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays, cacheParams.nTLBBasePageSectors, cacheParams.nTLBSuperpages)))
val pma_checker = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays, cacheParams.nTLBBasePageSectors, cacheParams.nTLBSuperpages)) with InlineInstance)
// tags
val replacer = ReplacementPolicy.fromString(cacheParams.replacementPolicy, nWays)
/** Metadata Arbiter:
* 0: Tag update on reset
* 1: Tag update on ECC error
* 2: Tag update on hit
* 3: Tag update on refill
* 4: Tag update on release
* 5: Tag update on flush
* 6: Tag update on probe
* 7: Tag update on CPU request
*/
val metaArb = Module(new Arbiter(new DCacheMetadataReq, 8) with InlineInstance)
val tag_array = DescribedSRAM(
name = s"${tileParams.baseName}_dcache_tag_array",
desc = "DCache Tag Array",
size = nSets,
data = Vec(nWays, chiselTypeOf(metaArb.io.out.bits.data))
)
// data
val data = Module(new DCacheDataArray)
/** Data Arbiter
* 0: data from pending store buffer
* 1: data from TL-D refill
* 2: release to TL-A
* 3: hit path to CPU
*/
val dataArb = Module(new Arbiter(new DCacheDataReq, 4) with InlineInstance)
dataArb.io.in.tail.foreach(_.bits.wdata := dataArb.io.in.head.bits.wdata) // tie off write ports by default
data.io.req.bits <> dataArb.io.out.bits
data.io.req.valid := dataArb.io.out.valid
dataArb.io.out.ready := true.B
metaArb.io.out.ready := clock_en_reg
val tl_out_a = Wire(chiselTypeOf(tl_out.a))
tl_out.a <> {
val a_queue_depth = outer.crossing match {
case RationalCrossing(_) => // TODO make this depend on the actual ratio?
if (cacheParams.separateUncachedResp) (maxUncachedInFlight + 1) / 2
else 2 min maxUncachedInFlight-1
case SynchronousCrossing(BufferParams.none) => 1 // Need some buffering to guarantee livelock freedom
case SynchronousCrossing(_) => 0 // Adequate buffering within the crossing
case _: AsynchronousCrossing => 0 // Adequate buffering within the crossing
case _: CreditedCrossing => 0 // Adequate buffering within the crossing
}
Queue(tl_out_a, a_queue_depth, flow = true)
}
val (tl_out_c, release_queue_empty) =
if (cacheParams.acquireBeforeRelease) {
val q = Module(new Queue(chiselTypeOf(tl_out.c.bits), cacheDataBeats, flow = true))
tl_out.c <> q.io.deq
(q.io.enq, q.io.count === 0.U)
} else {
(tl_out.c, true.B)
}
val s1_valid = RegNext(io.cpu.req.fire, false.B)
val s1_probe = RegNext(tl_out.b.fire, false.B)
val probe_bits = RegEnable(tl_out.b.bits, tl_out.b.fire) // TODO has data now :(
val s1_nack = WireDefault(false.B)
val s1_valid_masked = s1_valid && !io.cpu.s1_kill
val s1_valid_not_nacked = s1_valid && !s1_nack
val s1_tlb_req_valid = RegNext(io.tlb_port.req.fire, false.B)
val s2_tlb_req_valid = RegNext(s1_tlb_req_valid, false.B)
val s0_clk_en = metaArb.io.out.valid && !metaArb.io.out.bits.write
val s0_req = WireInit(io.cpu.req.bits)
s0_req.addr := Cat(metaArb.io.out.bits.addr >> blockOffBits, io.cpu.req.bits.addr(blockOffBits-1,0))
s0_req.idx.foreach(_ := Cat(metaArb.io.out.bits.idx, s0_req.addr(blockOffBits-1, 0)))
when (!metaArb.io.in(7).ready) { s0_req.phys := true.B }
val s1_req = RegEnable(s0_req, s0_clk_en)
val s1_vaddr = Cat(s1_req.idx.getOrElse(s1_req.addr) >> tagLSB, s1_req.addr(tagLSB-1, 0))
val s0_tlb_req = WireInit(io.tlb_port.req.bits)
when (!io.tlb_port.req.fire) {
s0_tlb_req.passthrough := s0_req.phys
s0_tlb_req.vaddr := s0_req.addr
s0_tlb_req.size := s0_req.size
s0_tlb_req.cmd := s0_req.cmd
s0_tlb_req.prv := s0_req.dprv
s0_tlb_req.v := s0_req.dv
}
val s1_tlb_req = RegEnable(s0_tlb_req, s0_clk_en || io.tlb_port.req.valid)
val s1_read = isRead(s1_req.cmd)
val s1_write = isWrite(s1_req.cmd)
val s1_readwrite = s1_read || s1_write
val s1_sfence = s1_req.cmd === M_SFENCE || s1_req.cmd === M_HFENCEV || s1_req.cmd === M_HFENCEG
val s1_flush_line = s1_req.cmd === M_FLUSH_ALL && s1_req.size(0)
val s1_flush_valid = Reg(Bool())
val s1_waw_hazard = Wire(Bool())
val s_ready :: s_voluntary_writeback :: s_probe_rep_dirty :: s_probe_rep_clean :: s_probe_retry :: s_probe_rep_miss :: s_voluntary_write_meta :: s_probe_write_meta :: s_dummy :: s_voluntary_release :: Nil = Enum(10)
val supports_flush = outer.flushOnFenceI || coreParams.haveCFlush
val flushed = RegInit(true.B)
val flushing = RegInit(false.B)
val flushing_req = Reg(chiselTypeOf(s1_req))
val cached_grant_wait = RegInit(false.B)
val resetting = RegInit(false.B)
val flushCounter = RegInit((nSets * (nWays-1)).U(log2Ceil(nSets * nWays).W))
val release_ack_wait = RegInit(false.B)
val release_ack_addr = Reg(UInt(paddrBits.W))
val release_state = RegInit(s_ready)
val refill_way = Reg(UInt())
val any_pstore_valid = Wire(Bool())
val inWriteback = release_state.isOneOf(s_voluntary_writeback, s_probe_rep_dirty)
val releaseWay = Wire(UInt())
io.cpu.req.ready := (release_state === s_ready) && !cached_grant_wait && !s1_nack
// I/O MSHRs
val uncachedInFlight = RegInit(VecInit(Seq.fill(maxUncachedInFlight)(false.B)))
val uncachedReqs = Reg(Vec(maxUncachedInFlight, new HellaCacheReq))
val uncachedResp = WireInit(new HellaCacheReq, DontCare)
// hit initiation path
val s0_read = isRead(io.cpu.req.bits.cmd)
dataArb.io.in(3).valid := io.cpu.req.valid && likelyNeedsRead(io.cpu.req.bits)
dataArb.io.in(3).bits := dataArb.io.in(1).bits
dataArb.io.in(3).bits.write := false.B
dataArb.io.in(3).bits.addr := Cat(io.cpu.req.bits.idx.getOrElse(io.cpu.req.bits.addr) >> tagLSB, io.cpu.req.bits.addr(tagLSB-1, 0))
dataArb.io.in(3).bits.wordMask := {
val mask = (subWordBytes.log2 until rowOffBits).foldLeft(1.U) { case (in, i) =>
val upper_mask = Mux((i >= wordBytes.log2).B || io.cpu.req.bits.size <= i.U, 0.U,
((BigInt(1) << (1 << (i - subWordBytes.log2)))-1).U)
val upper = Mux(io.cpu.req.bits.addr(i), in, 0.U) | upper_mask
val lower = Mux(io.cpu.req.bits.addr(i), 0.U, in)
upper ## lower
}
Fill(subWordBytes / eccBytes, mask)
}
dataArb.io.in(3).bits.eccMask := ~0.U((wordBytes / eccBytes).W)
dataArb.io.in(3).bits.way_en := ~0.U(nWays.W)
when (!dataArb.io.in(3).ready && s0_read) { io.cpu.req.ready := false.B }
val s1_did_read = RegEnable(dataArb.io.in(3).ready && (io.cpu.req.valid && needsRead(io.cpu.req.bits)), s0_clk_en)
val s1_read_mask = RegEnable(dataArb.io.in(3).bits.wordMask, s0_clk_en)
metaArb.io.in(7).valid := io.cpu.req.valid
metaArb.io.in(7).bits.write := false.B
metaArb.io.in(7).bits.idx := dataArb.io.in(3).bits.addr(idxMSB, idxLSB)
metaArb.io.in(7).bits.addr := io.cpu.req.bits.addr
metaArb.io.in(7).bits.way_en := metaArb.io.in(4).bits.way_en
metaArb.io.in(7).bits.data := metaArb.io.in(4).bits.data
when (!metaArb.io.in(7).ready) { io.cpu.req.ready := false.B }
// address translation
val s1_cmd_uses_tlb = s1_readwrite || s1_flush_line || s1_req.cmd === M_WOK
io.ptw <> tlb.io.ptw
tlb.io.kill := io.cpu.s2_kill || s2_tlb_req_valid && io.tlb_port.s2_kill
tlb.io.req.valid := s1_tlb_req_valid || s1_valid && !io.cpu.s1_kill && s1_cmd_uses_tlb
tlb.io.req.bits := s1_tlb_req
when (!tlb.io.req.ready && !tlb.io.ptw.resp.valid && !io.cpu.req.bits.phys) { io.cpu.req.ready := false.B }
when (!s1_tlb_req_valid && s1_valid && s1_cmd_uses_tlb && tlb.io.resp.miss) { s1_nack := true.B }
tlb.io.sfence.valid := s1_valid && !io.cpu.s1_kill && s1_sfence
tlb.io.sfence.bits.rs1 := s1_req.size(0)
tlb.io.sfence.bits.rs2 := s1_req.size(1)
tlb.io.sfence.bits.asid := io.cpu.s1_data.data
tlb.io.sfence.bits.addr := s1_req.addr
tlb.io.sfence.bits.hv := s1_req.cmd === M_HFENCEV
tlb.io.sfence.bits.hg := s1_req.cmd === M_HFENCEG
io.tlb_port.req.ready := clock_en_reg
io.tlb_port.s1_resp := tlb.io.resp
when (s1_tlb_req_valid && s1_valid && !(s1_req.phys && s1_req.no_xcpt)) { s1_nack := true.B }
pma_checker.io <> DontCare
pma_checker.io.req.bits.passthrough := true.B
pma_checker.io.req.bits.vaddr := s1_req.addr
pma_checker.io.req.bits.size := s1_req.size
pma_checker.io.req.bits.cmd := s1_req.cmd
pma_checker.io.req.bits.prv := s1_req.dprv
pma_checker.io.req.bits.v := s1_req.dv
val s1_paddr = Cat(Mux(s1_tlb_req_valid, s1_req.addr(paddrBits-1, pgIdxBits), tlb.io.resp.paddr >> pgIdxBits), s1_req.addr(pgIdxBits-1, 0))
val s1_victim_way = Wire(UInt())
val (s1_hit_way, s1_hit_state, s1_meta) =
if (usingDataScratchpad) {
val baseAddr = p(LookupByHartId)(_.dcache.flatMap(_.scratch.map(_.U)), io_hartid.get) | io_mmio_address_prefix.get
val inScratchpad = s1_paddr >= baseAddr && s1_paddr < baseAddr + (nSets * cacheBlockBytes).U
val hitState = Mux(inScratchpad, ClientMetadata.maximum, ClientMetadata.onReset)
val dummyMeta = L1Metadata(0.U, ClientMetadata.onReset)
(inScratchpad, hitState, Seq(tECC.encode(dummyMeta.asUInt)))
} else {
val metaReq = metaArb.io.out
val metaIdx = metaReq.bits.idx
when (metaReq.valid && metaReq.bits.write) {
val wmask = if (nWays == 1) Seq(true.B) else metaReq.bits.way_en.asBools
tag_array.write(metaIdx, VecInit(Seq.fill(nWays)(metaReq.bits.data)), wmask)
}
val s1_meta = tag_array.read(metaIdx, metaReq.valid && !metaReq.bits.write)
val s1_meta_uncorrected = s1_meta.map(tECC.decode(_).uncorrected.asTypeOf(new L1Metadata))
val s1_tag = s1_paddr >> tagLSB
val s1_meta_hit_way = s1_meta_uncorrected.map(r => r.coh.isValid() && r.tag === s1_tag).asUInt
val s1_meta_hit_state = (
s1_meta_uncorrected.map(r => Mux(r.tag === s1_tag && !s1_flush_valid, r.coh.asUInt, 0.U))
.reduce (_|_)).asTypeOf(chiselTypeOf(ClientMetadata.onReset))
(s1_meta_hit_way, s1_meta_hit_state, s1_meta)
}
val s1_data_way = WireDefault(if (nWays == 1) 1.U else Mux(inWriteback, releaseWay, s1_hit_way))
val tl_d_data_encoded = Wire(chiselTypeOf(encodeData(tl_out.d.bits.data, false.B)))
val s1_all_data_ways = VecInit(data.io.resp ++ (!cacheParams.separateUncachedResp).option(tl_d_data_encoded))
val s1_mask_xwr = new StoreGen(s1_req.size, s1_req.addr, 0.U, wordBytes).mask
val s1_mask = Mux(s1_req.cmd === M_PWR, io.cpu.s1_data.mask, s1_mask_xwr)
// for partial writes, s1_data.mask must be a subset of s1_mask_xwr
assert(!(s1_valid_masked && s1_req.cmd === M_PWR) || (s1_mask_xwr | ~io.cpu.s1_data.mask).andR)
val s2_valid = RegNext(s1_valid_masked && !s1_sfence, init=false.B)
val s2_valid_no_xcpt = s2_valid && !io.cpu.s2_xcpt.asUInt.orR
val s2_probe = RegNext(s1_probe, init=false.B)
val releaseInFlight = s1_probe || s2_probe || release_state =/= s_ready
val s2_not_nacked_in_s1 = RegNext(!s1_nack)
val s2_valid_not_nacked_in_s1 = s2_valid && s2_not_nacked_in_s1
val s2_valid_masked = s2_valid_no_xcpt && s2_not_nacked_in_s1
val s2_valid_not_killed = s2_valid_masked && !io.cpu.s2_kill
val s2_req = Reg(chiselTypeOf(io.cpu.req.bits))
val s2_cmd_flush_all = s2_req.cmd === M_FLUSH_ALL && !s2_req.size(0)
val s2_cmd_flush_line = s2_req.cmd === M_FLUSH_ALL && s2_req.size(0)
val s2_tlb_xcpt = Reg(chiselTypeOf(tlb.io.resp))
val s2_pma = Reg(chiselTypeOf(tlb.io.resp))
val s2_uncached_resp_addr = Reg(chiselTypeOf(s2_req.addr)) // should be DCE'd in synthesis
when (s1_valid_not_nacked || s1_flush_valid) {
s2_req := s1_req
s2_req.addr := s1_paddr
s2_tlb_xcpt := tlb.io.resp
s2_pma := Mux(s1_tlb_req_valid, pma_checker.io.resp, tlb.io.resp)
}
val s2_vaddr = Cat(RegEnable(s1_vaddr, s1_valid_not_nacked || s1_flush_valid) >> tagLSB, s2_req.addr(tagLSB-1, 0))
val s2_read = isRead(s2_req.cmd)
val s2_write = isWrite(s2_req.cmd)
val s2_readwrite = s2_read || s2_write
val s2_flush_valid_pre_tag_ecc = RegNext(s1_flush_valid)
val s1_meta_decoded = s1_meta.map(tECC.decode(_))
val s1_meta_clk_en = s1_valid_not_nacked || s1_flush_valid || s1_probe
val s2_meta_correctable_errors = s1_meta_decoded.map(m => RegEnable(m.correctable, s1_meta_clk_en)).asUInt
val s2_meta_uncorrectable_errors = s1_meta_decoded.map(m => RegEnable(m.uncorrectable, s1_meta_clk_en)).asUInt
val s2_meta_error_uncorrectable = s2_meta_uncorrectable_errors.orR
val s2_meta_corrected = s1_meta_decoded.map(m => RegEnable(m.corrected, s1_meta_clk_en).asTypeOf(new L1Metadata))
val s2_meta_error = (s2_meta_uncorrectable_errors | s2_meta_correctable_errors).orR
val s2_flush_valid = s2_flush_valid_pre_tag_ecc && !s2_meta_error
val s2_data = {
val wordsPerRow = rowBits / subWordBits
val en = s1_valid || inWriteback || io.cpu.replay_next
val word_en = Mux(inWriteback, Fill(wordsPerRow, 1.U), Mux(s1_did_read, s1_read_mask, 0.U))
val s1_way_words = s1_all_data_ways.map(_.grouped(dECC.width(eccBits) * (subWordBits / eccBits)))
if (cacheParams.pipelineWayMux) {
val s1_word_en = Mux(io.cpu.replay_next, 0.U, word_en)
(for (i <- 0 until wordsPerRow) yield {
val s2_way_en = RegEnable(Mux(s1_word_en(i), s1_data_way, 0.U), en)
val s2_way_words = (0 until nWays).map(j => RegEnable(s1_way_words(j)(i), en && word_en(i)))
(0 until nWays).map(j => Mux(s2_way_en(j), s2_way_words(j), 0.U)).reduce(_|_)
}).asUInt
} else {
val s1_word_en = Mux(!io.cpu.replay_next, word_en, UIntToOH(uncachedResp.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes)), wordsPerRow))
(for (i <- 0 until wordsPerRow) yield {
RegEnable(Mux1H(Mux(s1_word_en(i), s1_data_way, 0.U), s1_way_words.map(_(i))), en)
}).asUInt
}
}
val s2_probe_way = RegEnable(s1_hit_way, s1_probe)
val s2_probe_state = RegEnable(s1_hit_state, s1_probe)
val s2_hit_way = RegEnable(s1_hit_way, s1_valid_not_nacked)
val s2_hit_state = RegEnable(s1_hit_state, s1_valid_not_nacked || s1_flush_valid)
val s2_waw_hazard = RegEnable(s1_waw_hazard, s1_valid_not_nacked)
val s2_store_merge = Wire(Bool())
val s2_hit_valid = s2_hit_state.isValid()
val (s2_hit, s2_grow_param, s2_new_hit_state) = s2_hit_state.onAccess(s2_req.cmd)
val s2_data_decoded = decodeData(s2_data)
val s2_word_idx = s2_req.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes))
val s2_data_error = s2_data_decoded.map(_.error).orR
val s2_data_error_uncorrectable = s2_data_decoded.map(_.uncorrectable).orR
val s2_data_corrected = (s2_data_decoded.map(_.corrected): Seq[UInt]).asUInt
val s2_data_uncorrected = (s2_data_decoded.map(_.uncorrected): Seq[UInt]).asUInt
val s2_valid_hit_maybe_flush_pre_data_ecc_and_waw = s2_valid_masked && !s2_meta_error && s2_hit
val s2_no_alloc_hazard = if (!usingVM || pgIdxBits >= untagBits) false.B else {
// make sure that any in-flight non-allocating accesses are ordered before
// any allocating accesses. this can only happen if aliasing is possible.
val any_no_alloc_in_flight = Reg(Bool())
when (!uncachedInFlight.asUInt.orR) { any_no_alloc_in_flight := false.B }
when (s2_valid && s2_req.no_alloc) { any_no_alloc_in_flight := true.B }
val s1_need_check = any_no_alloc_in_flight || s2_valid && s2_req.no_alloc
val concerns = (uncachedInFlight zip uncachedReqs) :+ (s2_valid && s2_req.no_alloc, s2_req)
val s1_uncached_hits = concerns.map { c =>
val concern_wmask = new StoreGen(c._2.size, c._2.addr, 0.U, wordBytes).mask
val addr_match = (c._2.addr ^ s1_paddr)(pgIdxBits+pgLevelBits-1, wordBytes.log2) === 0.U
val mask_match = (concern_wmask & s1_mask_xwr).orR || c._2.cmd === M_PWR || s1_req.cmd === M_PWR
val cmd_match = isWrite(c._2.cmd) || isWrite(s1_req.cmd)
c._1 && s1_need_check && cmd_match && addr_match && mask_match
}
val s2_uncached_hits = RegEnable(s1_uncached_hits.asUInt, s1_valid_not_nacked)
s2_uncached_hits.orR
}
val s2_valid_hit_pre_data_ecc_and_waw = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw && s2_readwrite && !s2_no_alloc_hazard
val s2_valid_flush_line = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw && s2_cmd_flush_line
val s2_valid_hit_pre_data_ecc = s2_valid_hit_pre_data_ecc_and_waw && (!s2_waw_hazard || s2_store_merge)
val s2_valid_data_error = s2_valid_hit_pre_data_ecc_and_waw && s2_data_error
val s2_valid_hit = s2_valid_hit_pre_data_ecc && !s2_data_error
val s2_valid_miss = s2_valid_masked && s2_readwrite && !s2_meta_error && !s2_hit
val s2_uncached = !s2_pma.cacheable || s2_req.no_alloc && !s2_pma.must_alloc && !s2_hit_valid
val s2_valid_cached_miss = s2_valid_miss && !s2_uncached && !uncachedInFlight.asUInt.orR
dontTouch(s2_valid_cached_miss)
val s2_want_victimize = (!usingDataScratchpad).B && (s2_valid_cached_miss || s2_valid_flush_line || s2_valid_data_error || s2_flush_valid)
val s2_cannot_victimize = !s2_flush_valid && io.cpu.s2_kill
val s2_victimize = s2_want_victimize && !s2_cannot_victimize
val s2_valid_uncached_pending = s2_valid_miss && s2_uncached && !uncachedInFlight.asUInt.andR
val s2_victim_way = UIntToOH(RegEnable(s1_victim_way, s1_valid_not_nacked || s1_flush_valid))
val s2_victim_or_hit_way = Mux(s2_hit_valid, s2_hit_way, s2_victim_way)
val s2_victim_tag = Mux(s2_valid_data_error || s2_valid_flush_line, s2_req.addr(paddrBits-1, tagLSB), Mux1H(s2_victim_way, s2_meta_corrected).tag)
val s2_victim_state = Mux(s2_hit_valid, s2_hit_state, Mux1H(s2_victim_way, s2_meta_corrected).coh)
val (s2_prb_ack_data, s2_report_param, probeNewCoh)= s2_probe_state.onProbe(probe_bits.param)
val (s2_victim_dirty, s2_shrink_param, voluntaryNewCoh) = s2_victim_state.onCacheControl(M_FLUSH)
dontTouch(s2_victim_dirty)
val s2_update_meta = s2_hit_state =/= s2_new_hit_state
val s2_dont_nack_uncached = s2_valid_uncached_pending && tl_out_a.ready
val s2_dont_nack_misc = s2_valid_masked && !s2_meta_error &&
(supports_flush.B && s2_cmd_flush_all && flushed && !flushing ||
supports_flush.B && s2_cmd_flush_line && !s2_hit ||
s2_req.cmd === M_WOK)
io.cpu.s2_nack := s2_valid_no_xcpt && !s2_dont_nack_uncached && !s2_dont_nack_misc && !s2_valid_hit
when (io.cpu.s2_nack || (s2_valid_hit_pre_data_ecc_and_waw && s2_update_meta)) { s1_nack := true.B }
// tag updates on ECC errors
val s2_first_meta_corrected = PriorityMux(s2_meta_correctable_errors, s2_meta_corrected)
metaArb.io.in(1).valid := s2_meta_error && (s2_valid_masked || s2_flush_valid_pre_tag_ecc || s2_probe)
metaArb.io.in(1).bits.write := true.B
metaArb.io.in(1).bits.way_en := s2_meta_uncorrectable_errors | Mux(s2_meta_error_uncorrectable, 0.U, PriorityEncoderOH(s2_meta_correctable_errors))
metaArb.io.in(1).bits.idx := Mux(s2_probe, probeIdx(probe_bits), s2_vaddr(idxMSB, idxLSB))
metaArb.io.in(1).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaArb.io.in(1).bits.idx << blockOffBits)
metaArb.io.in(1).bits.data := tECC.encode {
val new_meta = WireDefault(s2_first_meta_corrected)
when (s2_meta_error_uncorrectable) { new_meta.coh := ClientMetadata.onReset }
new_meta.asUInt
}
// tag updates on hit
metaArb.io.in(2).valid := s2_valid_hit_pre_data_ecc_and_waw && s2_update_meta
metaArb.io.in(2).bits.write := !io.cpu.s2_kill
metaArb.io.in(2).bits.way_en := s2_victim_or_hit_way
metaArb.io.in(2).bits.idx := s2_vaddr(idxMSB, idxLSB)
metaArb.io.in(2).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, s2_vaddr(idxMSB, 0))
metaArb.io.in(2).bits.data := tECC.encode(L1Metadata(s2_req.addr >> tagLSB, s2_new_hit_state).asUInt)
// load reservations and TL error reporting
val s2_lr = (usingAtomics && !usingDataScratchpad).B && s2_req.cmd === M_XLR
val s2_sc = (usingAtomics && !usingDataScratchpad).B && s2_req.cmd === M_XSC
val lrscCount = RegInit(0.U)
val lrscValid = lrscCount > lrscBackoff.U
val lrscBackingOff = lrscCount > 0.U && !lrscValid
val lrscAddr = Reg(UInt())
val lrscAddrMatch = lrscAddr === (s2_req.addr >> blockOffBits)
val s2_sc_fail = s2_sc && !(lrscValid && lrscAddrMatch)
when ((s2_valid_hit && s2_lr && !cached_grant_wait || s2_valid_cached_miss) && !io.cpu.s2_kill) {
lrscCount := Mux(s2_hit, (lrscCycles - 1).U, 0.U)
lrscAddr := s2_req.addr >> blockOffBits
}
when (lrscCount > 0.U) { lrscCount := lrscCount - 1.U }
when (s2_valid_not_killed && lrscValid) { lrscCount := lrscBackoff.U }
when (s1_probe) { lrscCount := 0.U }
// don't perform data correction if it might clobber a recent store
val s2_correct = s2_data_error && !any_pstore_valid && !RegNext(any_pstore_valid || s2_valid) && usingDataScratchpad.B
// pending store buffer
val s2_valid_correct = s2_valid_hit_pre_data_ecc_and_waw && s2_correct && !io.cpu.s2_kill
def s2_store_valid_pre_kill = s2_valid_hit && s2_write && !s2_sc_fail
def s2_store_valid = s2_store_valid_pre_kill && !io.cpu.s2_kill
val pstore1_cmd = RegEnable(s1_req.cmd, s1_valid_not_nacked && s1_write)
val pstore1_addr = RegEnable(s1_vaddr, s1_valid_not_nacked && s1_write)
val pstore1_data = RegEnable(io.cpu.s1_data.data, s1_valid_not_nacked && s1_write)
val pstore1_way = RegEnable(s1_hit_way, s1_valid_not_nacked && s1_write)
val pstore1_mask = RegEnable(s1_mask, s1_valid_not_nacked && s1_write)
val pstore1_storegen_data = WireDefault(pstore1_data)
val pstore1_rmw = usingRMW.B && RegEnable(needsRead(s1_req), s1_valid_not_nacked && s1_write)
val pstore1_merge_likely = s2_valid_not_nacked_in_s1 && s2_write && s2_store_merge
val pstore1_merge = s2_store_valid && s2_store_merge
val pstore2_valid = RegInit(false.B)
val pstore_drain_opportunistic = !(io.cpu.req.valid && likelyNeedsRead(io.cpu.req.bits)) && !(s1_valid && s1_waw_hazard)
val pstore_drain_on_miss = releaseInFlight || RegNext(io.cpu.s2_nack)
val pstore1_held = RegInit(false.B)
val pstore1_valid_likely = s2_valid && s2_write || pstore1_held
def pstore1_valid_not_rmw(s2_kill: Bool) = s2_valid_hit_pre_data_ecc && s2_write && !s2_kill || pstore1_held
val pstore1_valid = s2_store_valid || pstore1_held
any_pstore_valid := pstore1_held || pstore2_valid
val pstore_drain_structural = pstore1_valid_likely && pstore2_valid && ((s1_valid && s1_write) || pstore1_rmw)
assert(pstore1_rmw || pstore1_valid_not_rmw(io.cpu.s2_kill) === pstore1_valid)
ccover(pstore_drain_structural, "STORE_STRUCTURAL_HAZARD", "D$ read-modify-write structural hazard")
ccover(pstore1_valid && pstore_drain_on_miss, "STORE_DRAIN_ON_MISS", "D$ store buffer drain on miss")
ccover(s1_valid_not_nacked && s1_waw_hazard, "WAW_HAZARD", "D$ write-after-write hazard")
def should_pstore_drain(truly: Bool) = {
val s2_kill = truly && io.cpu.s2_kill
!pstore1_merge_likely &&
(usingRMW.B && pstore_drain_structural ||
(((pstore1_valid_not_rmw(s2_kill) && !pstore1_rmw) || pstore2_valid) && (pstore_drain_opportunistic || pstore_drain_on_miss)))
}
val pstore_drain = should_pstore_drain(true.B)
pstore1_held := (s2_store_valid && !s2_store_merge || pstore1_held) && pstore2_valid && !pstore_drain
val advance_pstore1 = (pstore1_valid || s2_valid_correct) && (pstore2_valid === pstore_drain)
pstore2_valid := pstore2_valid && !pstore_drain || advance_pstore1
val pstore2_addr = RegEnable(Mux(s2_correct, s2_vaddr, pstore1_addr), advance_pstore1)
val pstore2_way = RegEnable(Mux(s2_correct, s2_hit_way, pstore1_way), advance_pstore1)
val pstore2_storegen_data = {
for (i <- 0 until wordBytes)
yield RegEnable(pstore1_storegen_data(8*(i+1)-1, 8*i), advance_pstore1 || pstore1_merge && pstore1_mask(i))
}.asUInt
val pstore2_storegen_mask = {
val mask = Reg(UInt(wordBytes.W))
when (advance_pstore1 || pstore1_merge) {
val mergedMask = pstore1_mask | Mux(pstore1_merge, mask, 0.U)
mask := ~Mux(s2_correct, 0.U, ~mergedMask)
}
mask
}
s2_store_merge := (if (eccBytes == 1) false.B else {
ccover(pstore1_merge, "STORE_MERGED", "D$ store merged")
// only merge stores to ECC granules that are already stored-to, to avoid
// WAW hazards
val wordMatch = (eccMask(pstore2_storegen_mask) | ~eccMask(pstore1_mask)).andR
val idxMatch = s2_vaddr(untagBits-1, log2Ceil(wordBytes)) === pstore2_addr(untagBits-1, log2Ceil(wordBytes))
val tagMatch = (s2_hit_way & pstore2_way).orR
pstore2_valid && wordMatch && idxMatch && tagMatch
})
dataArb.io.in(0).valid := should_pstore_drain(false.B)
dataArb.io.in(0).bits.write := pstore_drain
dataArb.io.in(0).bits.addr := Mux(pstore2_valid, pstore2_addr, pstore1_addr)
dataArb.io.in(0).bits.way_en := Mux(pstore2_valid, pstore2_way, pstore1_way)
dataArb.io.in(0).bits.wdata := encodeData(Fill(rowWords, Mux(pstore2_valid, pstore2_storegen_data, pstore1_data)), false.B)
dataArb.io.in(0).bits.wordMask := {
val eccMask = dataArb.io.in(0).bits.eccMask.asBools.grouped(subWordBytes/eccBytes).map(_.orR).toSeq.asUInt
val wordMask = UIntToOH(Mux(pstore2_valid, pstore2_addr, pstore1_addr).extract(rowOffBits-1, wordBytes.log2))
FillInterleaved(wordBytes/subWordBytes, wordMask) & Fill(rowBytes/wordBytes, eccMask)
}
dataArb.io.in(0).bits.eccMask := eccMask(Mux(pstore2_valid, pstore2_storegen_mask, pstore1_mask))
// store->load RAW hazard detection
def s1Depends(addr: UInt, mask: UInt) =
addr(idxMSB, wordOffBits) === s1_vaddr(idxMSB, wordOffBits) &&
Mux(s1_write, (eccByteMask(mask) & eccByteMask(s1_mask_xwr)).orR, (mask & s1_mask_xwr).orR)
val s1_hazard =
(pstore1_valid_likely && s1Depends(pstore1_addr, pstore1_mask)) ||
(pstore2_valid && s1Depends(pstore2_addr, pstore2_storegen_mask))
val s1_raw_hazard = s1_read && s1_hazard
s1_waw_hazard := (if (eccBytes == 1) false.B else {
ccover(s1_valid_not_nacked && s1_waw_hazard, "WAW_HAZARD", "D$ write-after-write hazard")
s1_write && (s1_hazard || needsRead(s1_req) && !s1_did_read)
})
when (s1_valid && s1_raw_hazard) { s1_nack := true.B }
// performance hints to processor
io.cpu.s2_nack_cause_raw := RegNext(s1_raw_hazard) || !(!s2_waw_hazard || s2_store_merge)
// Prepare a TileLink request message that initiates a transaction
val a_source = PriorityEncoder(~uncachedInFlight.asUInt << mmioOffset) // skip the MSHR
val acquire_address = (s2_req.addr >> idxLSB) << idxLSB
val access_address = s2_req.addr
val a_size = s2_req.size
val a_data = Fill(beatWords, pstore1_data)
val a_mask = pstore1_mask << (access_address.extract(beatBytes.log2-1, wordBytes.log2) << 3)
val get = edge.Get(a_source, access_address, a_size)._2
val put = edge.Put(a_source, access_address, a_size, a_data)._2
val putpartial = edge.Put(a_source, access_address, a_size, a_data, a_mask)._2
val atomics = if (edge.manager.anySupportLogical) {
MuxLookup(s2_req.cmd, WireDefault(0.U.asTypeOf(new TLBundleA(edge.bundle))))(Array(
M_XA_SWAP -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.SWAP)._2,
M_XA_XOR -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.XOR) ._2,
M_XA_OR -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.OR) ._2,
M_XA_AND -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.AND) ._2,
M_XA_ADD -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.ADD)._2,
M_XA_MIN -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MIN)._2,
M_XA_MAX -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAX)._2,
M_XA_MINU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MINU)._2,
M_XA_MAXU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAXU)._2))
} else {
// If no managers support atomics, assert fail if processor asks for them
assert (!(tl_out_a.valid && s2_read && s2_write && s2_uncached))
WireDefault(new TLBundleA(edge.bundle), DontCare)
}
tl_out_a.valid := !io.cpu.s2_kill &&
(s2_valid_uncached_pending ||
(s2_valid_cached_miss &&
!(release_ack_wait && (s2_req.addr ^ release_ack_addr)(((pgIdxBits + pgLevelBits) min paddrBits) - 1, idxLSB) === 0.U) &&
(cacheParams.acquireBeforeRelease.B && !release_ack_wait && release_queue_empty || !s2_victim_dirty)))
tl_out_a.bits := Mux(!s2_uncached, acquire(s2_vaddr, s2_req.addr, s2_grow_param),
Mux(!s2_write, get,
Mux(s2_req.cmd === M_PWR, putpartial,
Mux(!s2_read, put, atomics))))
// Drive APROT Bits
tl_out_a.bits.user.lift(AMBAProt).foreach { x =>
val user_bit_cacheable = s2_pma.cacheable
x.privileged := s2_req.dprv === PRV.M.U || user_bit_cacheable
// if the address is cacheable, enable outer caches
x.bufferable := user_bit_cacheable
x.modifiable := user_bit_cacheable
x.readalloc := user_bit_cacheable
x.writealloc := user_bit_cacheable
// Following are always tied off
x.fetch := false.B
x.secure := true.B
}
// Set pending bits for outstanding TileLink transaction
val a_sel = UIntToOH(a_source, maxUncachedInFlight+mmioOffset) >> mmioOffset
when (tl_out_a.fire) {
when (s2_uncached) {
(a_sel.asBools zip (uncachedInFlight zip uncachedReqs)) foreach { case (s, (f, r)) =>
when (s) {
f := true.B
r := s2_req
r.cmd := Mux(s2_write, Mux(s2_req.cmd === M_PWR, M_PWR, M_XWR), M_XRD)
}
}
}.otherwise {
cached_grant_wait := true.B
refill_way := s2_victim_or_hit_way
}
}
// grant
val (d_first, d_last, d_done, d_address_inc) = edge.addr_inc(tl_out.d)
val (d_opc, grantIsUncached, grantIsUncachedData) = {
val uncachedGrantOpcodesSansData = Seq(AccessAck, HintAck)
val uncachedGrantOpcodesWithData = Seq(AccessAckData)
val uncachedGrantOpcodes = uncachedGrantOpcodesWithData ++ uncachedGrantOpcodesSansData
val whole_opc = tl_out.d.bits.opcode
if (usingDataScratchpad) {
assert(!tl_out.d.valid || whole_opc.isOneOf(uncachedGrantOpcodes))
// the only valid TL-D messages are uncached, so we can do some pruning
val opc = whole_opc(uncachedGrantOpcodes.map(_.getWidth).max - 1, 0)
val data = DecodeLogic(opc, uncachedGrantOpcodesWithData, uncachedGrantOpcodesSansData)
(opc, true.B, data)
} else {
(whole_opc, whole_opc.isOneOf(uncachedGrantOpcodes), whole_opc.isOneOf(uncachedGrantOpcodesWithData))
}
}
tl_d_data_encoded := encodeData(tl_out.d.bits.data, tl_out.d.bits.corrupt && !io.ptw.customCSRs.suppressCorruptOnGrantData && !grantIsUncached)
val grantIsCached = d_opc.isOneOf(Grant, GrantData)
val grantIsVoluntary = d_opc === ReleaseAck // Clears a different pending bit
val grantIsRefill = d_opc === GrantData // Writes the data array
val grantInProgress = RegInit(false.B)
val blockProbeAfterGrantCount = RegInit(0.U)
when (blockProbeAfterGrantCount > 0.U) { blockProbeAfterGrantCount := blockProbeAfterGrantCount - 1.U }
val canAcceptCachedGrant = !release_state.isOneOf(s_voluntary_writeback, s_voluntary_write_meta, s_voluntary_release)
tl_out.d.ready := Mux(grantIsCached, (!d_first || tl_out.e.ready) && canAcceptCachedGrant, true.B)
val uncachedRespIdxOH = UIntToOH(tl_out.d.bits.source, maxUncachedInFlight+mmioOffset) >> mmioOffset
uncachedResp := Mux1H(uncachedRespIdxOH, uncachedReqs)
when (tl_out.d.fire) {
when (grantIsCached) {
grantInProgress := true.B
assert(cached_grant_wait, "A GrantData was unexpected by the dcache.")
when(d_last) {
cached_grant_wait := false.B
grantInProgress := false.B
blockProbeAfterGrantCount := (blockProbeAfterGrantCycles - 1).U
replacer.miss
}
} .elsewhen (grantIsUncached) {
(uncachedRespIdxOH.asBools zip uncachedInFlight) foreach { case (s, f) =>
when (s && d_last) {
assert(f, "An AccessAck was unexpected by the dcache.") // TODO must handle Ack coming back on same cycle!
f := false.B
}
}
when (grantIsUncachedData) {
if (!cacheParams.separateUncachedResp) {
if (!cacheParams.pipelineWayMux)
s1_data_way := 1.U << nWays
s2_req.cmd := M_XRD
s2_req.size := uncachedResp.size
s2_req.signed := uncachedResp.signed
s2_req.tag := uncachedResp.tag
s2_req.addr := {
require(rowOffBits >= beatOffBits)
val dontCareBits = s1_paddr >> rowOffBits << rowOffBits
dontCareBits | uncachedResp.addr(beatOffBits-1, 0)
}
s2_uncached_resp_addr := uncachedResp.addr
}
}
} .elsewhen (grantIsVoluntary) {
assert(release_ack_wait, "A ReleaseAck was unexpected by the dcache.") // TODO should handle Ack coming back on same cycle!
release_ack_wait := false.B
}
}
// Finish TileLink transaction by issuing a GrantAck
tl_out.e.valid := tl_out.d.valid && d_first && grantIsCached && canAcceptCachedGrant
tl_out.e.bits := edge.GrantAck(tl_out.d.bits)
assert(tl_out.e.fire === (tl_out.d.fire && d_first && grantIsCached))
// data refill
// note this ready-valid signaling ignores E-channel backpressure, which
// benignly means the data RAM might occasionally be redundantly written
dataArb.io.in(1).valid := tl_out.d.valid && grantIsRefill && canAcceptCachedGrant
when (grantIsRefill && !dataArb.io.in(1).ready) {
tl_out.e.valid := false.B
tl_out.d.ready := false.B
}
if (!usingDataScratchpad) {
dataArb.io.in(1).bits.write := true.B
dataArb.io.in(1).bits.addr := (s2_vaddr >> idxLSB) << idxLSB | d_address_inc
dataArb.io.in(1).bits.way_en := refill_way
dataArb.io.in(1).bits.wdata := tl_d_data_encoded
dataArb.io.in(1).bits.wordMask := ~0.U((rowBytes / subWordBytes).W)
dataArb.io.in(1).bits.eccMask := ~0.U((wordBytes / eccBytes).W)
} else {
dataArb.io.in(1).bits := dataArb.io.in(0).bits
}
// tag updates on refill
// ignore backpressure from metaArb, which can only be caused by tag ECC
// errors on hit-under-miss. failing to write the new tag will leave the
// line invalid, so we'll simply request the line again later.
metaArb.io.in(3).valid := grantIsCached && d_done && !tl_out.d.bits.denied
metaArb.io.in(3).bits.write := true.B
metaArb.io.in(3).bits.way_en := refill_way
metaArb.io.in(3).bits.idx := s2_vaddr(idxMSB, idxLSB)
metaArb.io.in(3).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, s2_vaddr(idxMSB, 0))
metaArb.io.in(3).bits.data := tECC.encode(L1Metadata(s2_req.addr >> tagLSB, s2_hit_state.onGrant(s2_req.cmd, tl_out.d.bits.param)).asUInt)
if (!cacheParams.separateUncachedResp) {
// don't accept uncached grants if there's a structural hazard on s2_data...
val blockUncachedGrant = Reg(Bool())
blockUncachedGrant := dataArb.io.out.valid
when (grantIsUncachedData && (blockUncachedGrant || s1_valid)) {
tl_out.d.ready := false.B
// ...but insert bubble to guarantee grant's eventual forward progress
when (tl_out.d.valid) {
io.cpu.req.ready := false.B
dataArb.io.in(1).valid := true.B
dataArb.io.in(1).bits.write := false.B
blockUncachedGrant := !dataArb.io.in(1).ready
}
}
}
ccover(tl_out.d.valid && !tl_out.d.ready, "BLOCK_D", "D$ D-channel blocked")
// Handle an incoming TileLink Probe message
val block_probe_for_core_progress = blockProbeAfterGrantCount > 0.U || lrscValid
val block_probe_for_pending_release_ack = release_ack_wait && (tl_out.b.bits.address ^ release_ack_addr)(((pgIdxBits + pgLevelBits) min paddrBits) - 1, idxLSB) === 0.U
val block_probe_for_ordering = releaseInFlight || block_probe_for_pending_release_ack || grantInProgress
metaArb.io.in(6).valid := tl_out.b.valid && (!block_probe_for_core_progress || lrscBackingOff)
tl_out.b.ready := metaArb.io.in(6).ready && !(block_probe_for_core_progress || block_probe_for_ordering || s1_valid || s2_valid)
metaArb.io.in(6).bits.write := false.B
metaArb.io.in(6).bits.idx := probeIdx(tl_out.b.bits)
metaArb.io.in(6).bits.addr := Cat(io.cpu.req.bits.addr >> paddrBits, tl_out.b.bits.address)
metaArb.io.in(6).bits.way_en := metaArb.io.in(4).bits.way_en
metaArb.io.in(6).bits.data := metaArb.io.in(4).bits.data
// replacement policy
s1_victim_way := (if (replacer.perSet && nWays > 1) {
val repl_array = Mem(nSets, UInt(replacer.nBits.W))
val s1_repl_idx = s1_req.addr(idxBits+blockOffBits-1, blockOffBits)
val s2_repl_idx = s2_vaddr(idxBits+blockOffBits-1, blockOffBits)
val s2_repl_state = Reg(UInt(replacer.nBits.W))
val s2_new_repl_state = replacer.get_next_state(s2_repl_state, OHToUInt(s2_hit_way))
val s2_repl_wen = s2_valid_masked && s2_hit_way.orR && s2_repl_state =/= s2_new_repl_state
val s1_repl_state = Mux(s2_repl_wen && s2_repl_idx === s1_repl_idx, s2_new_repl_state, repl_array(s1_repl_idx))
when (s1_valid_not_nacked) { s2_repl_state := s1_repl_state }
val waddr = Mux(resetting, flushCounter(idxBits-1, 0), s2_repl_idx)
val wdata = Mux(resetting, 0.U, s2_new_repl_state)
val wen = resetting || s2_repl_wen
when (wen) { repl_array(waddr) := wdata }
replacer.get_replace_way(s1_repl_state)
} else {
replacer.way
})
// release
val (c_first, c_last, releaseDone, c_count) = edge.count(tl_out_c)
val releaseRejected = Wire(Bool())
val s1_release_data_valid = RegNext(dataArb.io.in(2).fire)
val s2_release_data_valid = RegNext(s1_release_data_valid && !releaseRejected)
releaseRejected := s2_release_data_valid && !tl_out_c.fire
val releaseDataBeat = Cat(0.U, c_count) + Mux(releaseRejected, 0.U, s1_release_data_valid + Cat(0.U, s2_release_data_valid))
val nackResponseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = TLPermissions.NtoN)
val cleanReleaseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = s2_report_param)
val dirtyReleaseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = s2_report_param, data = 0.U)
tl_out_c.valid := (s2_release_data_valid || (!cacheParams.silentDrop.B && release_state === s_voluntary_release)) && !(c_first && release_ack_wait)
tl_out_c.bits := nackResponseMessage
val newCoh = WireDefault(probeNewCoh)
releaseWay := s2_probe_way
if (!usingDataScratchpad) {
when (s2_victimize) {
assert(s2_valid_flush_line || s2_flush_valid || io.cpu.s2_nack)
val discard_line = s2_valid_flush_line && s2_req.size(1) || s2_flush_valid && flushing_req.size(1)
release_state := Mux(s2_victim_dirty && !discard_line, s_voluntary_writeback,
Mux(!cacheParams.silentDrop.B && !release_ack_wait && release_queue_empty && s2_victim_state.isValid() && (s2_valid_flush_line || s2_flush_valid || s2_readwrite && !s2_hit_valid), s_voluntary_release,
s_voluntary_write_meta))
probe_bits := addressToProbe(s2_vaddr, Cat(s2_victim_tag, s2_req.addr(tagLSB-1, idxLSB)) << idxLSB)
}
when (s2_probe) {
val probeNack = WireDefault(true.B)
when (s2_meta_error) {
release_state := s_probe_retry
}.elsewhen (s2_prb_ack_data) {
release_state := s_probe_rep_dirty
}.elsewhen (s2_probe_state.isValid()) {
tl_out_c.valid := true.B
tl_out_c.bits := cleanReleaseMessage
release_state := Mux(releaseDone, s_probe_write_meta, s_probe_rep_clean)
}.otherwise {
tl_out_c.valid := true.B
probeNack := !releaseDone
release_state := Mux(releaseDone, s_ready, s_probe_rep_miss)
}
when (probeNack) { s1_nack := true.B }
}
when (release_state === s_probe_retry) {
metaArb.io.in(6).valid := true.B
metaArb.io.in(6).bits.idx := probeIdx(probe_bits)
metaArb.io.in(6).bits.addr := Cat(io.cpu.req.bits.addr >> paddrBits, probe_bits.address)
when (metaArb.io.in(6).ready) {
release_state := s_ready
s1_probe := true.B
}
}
when (release_state === s_probe_rep_miss) {
tl_out_c.valid := true.B
when (releaseDone) { release_state := s_ready }
}
when (release_state === s_probe_rep_clean) {
tl_out_c.valid := true.B
tl_out_c.bits := cleanReleaseMessage
when (releaseDone) { release_state := s_probe_write_meta }
}
when (release_state === s_probe_rep_dirty) {
tl_out_c.bits := dirtyReleaseMessage
when (releaseDone) { release_state := s_probe_write_meta }
}
when (release_state.isOneOf(s_voluntary_writeback, s_voluntary_write_meta, s_voluntary_release)) {
when (release_state === s_voluntary_release) {
tl_out_c.bits := edge.Release(fromSource = 0.U,
toAddress = 0.U,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = s2_shrink_param)._2
}.otherwise {
tl_out_c.bits := edge.Release(fromSource = 0.U,
toAddress = 0.U,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = s2_shrink_param,
data = 0.U)._2
}
newCoh := voluntaryNewCoh
releaseWay := s2_victim_or_hit_way
when (releaseDone) { release_state := s_voluntary_write_meta }
when (tl_out_c.fire && c_first) {
release_ack_wait := true.B
release_ack_addr := probe_bits.address
}
}
tl_out_c.bits.source := probe_bits.source
tl_out_c.bits.address := probe_bits.address
tl_out_c.bits.data := s2_data_corrected
tl_out_c.bits.corrupt := inWriteback && s2_data_error_uncorrectable
}
tl_out_c.bits.user.lift(AMBAProt).foreach { x =>
x.fetch := false.B
x.secure := true.B
x.privileged := true.B
x.bufferable := true.B
x.modifiable := true.B
x.readalloc := true.B
x.writealloc := true.B
}
dataArb.io.in(2).valid := inWriteback && releaseDataBeat < refillCycles.U
dataArb.io.in(2).bits := dataArb.io.in(1).bits
dataArb.io.in(2).bits.write := false.B
dataArb.io.in(2).bits.addr := (probeIdx(probe_bits) << blockOffBits) | (releaseDataBeat(log2Up(refillCycles)-1,0) << rowOffBits)
dataArb.io.in(2).bits.wordMask := ~0.U((rowBytes / subWordBytes).W)
dataArb.io.in(2).bits.eccMask := ~0.U((wordBytes / eccBytes).W)
dataArb.io.in(2).bits.way_en := ~0.U(nWays.W)
metaArb.io.in(4).valid := release_state.isOneOf(s_voluntary_write_meta, s_probe_write_meta)
metaArb.io.in(4).bits.write := true.B
metaArb.io.in(4).bits.way_en := releaseWay
metaArb.io.in(4).bits.idx := probeIdx(probe_bits)
metaArb.io.in(4).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, probe_bits.address(idxMSB, 0))
metaArb.io.in(4).bits.data := tECC.encode(L1Metadata(tl_out_c.bits.address >> tagLSB, newCoh).asUInt)
when (metaArb.io.in(4).fire) { release_state := s_ready }
// cached response
(io.cpu.resp.bits: Data).waiveAll :<>= (s2_req: Data).waiveAll
io.cpu.resp.bits.has_data := s2_read
io.cpu.resp.bits.replay := false.B
io.cpu.s2_uncached := s2_uncached && !s2_hit
io.cpu.s2_paddr := s2_req.addr
io.cpu.s2_gpa := s2_tlb_xcpt.gpa
io.cpu.s2_gpa_is_pte := s2_tlb_xcpt.gpa_is_pte
// report whether there are any outstanding accesses. disregard any
// slave-port accesses, since they don't affect local memory ordering.
val s1_isSlavePortAccess = s1_req.no_xcpt
val s2_isSlavePortAccess = s2_req.no_xcpt
io.cpu.ordered := !(s1_valid && !s1_isSlavePortAccess || s2_valid && !s2_isSlavePortAccess || cached_grant_wait || uncachedInFlight.asUInt.orR)
io.cpu.store_pending := (cached_grant_wait && isWrite(s2_req.cmd)) || uncachedInFlight.asUInt.orR
val s1_xcpt_valid = tlb.io.req.valid && !s1_isSlavePortAccess && !s1_nack
io.cpu.s2_xcpt := Mux(RegNext(s1_xcpt_valid), s2_tlb_xcpt, 0.U.asTypeOf(s2_tlb_xcpt))
if (usingDataScratchpad) {
assert(!(s2_valid_masked && s2_req.cmd.isOneOf(M_XLR, M_XSC)))
} else {
ccover(tl_out.b.valid && !tl_out.b.ready, "BLOCK_B", "D$ B-channel blocked")
}
// uncached response
val s1_uncached_data_word = {
val word_idx = uncachedResp.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes))
val words = tl_out.d.bits.data.grouped(wordBits)
words(word_idx)
}
val s2_uncached_data_word = RegEnable(s1_uncached_data_word, io.cpu.replay_next)
val doUncachedResp = RegNext(io.cpu.replay_next)
io.cpu.resp.valid := (s2_valid_hit_pre_data_ecc || doUncachedResp) && !s2_data_error
io.cpu.replay_next := tl_out.d.fire && grantIsUncachedData && !cacheParams.separateUncachedResp.B
when (doUncachedResp) {
assert(!s2_valid_hit)
io.cpu.resp.bits.replay := true.B
io.cpu.resp.bits.addr := s2_uncached_resp_addr
}
io.cpu.uncached_resp.map { resp =>
resp.valid := tl_out.d.valid && grantIsUncachedData
resp.bits.tag := uncachedResp.tag
resp.bits.size := uncachedResp.size
resp.bits.signed := uncachedResp.signed
resp.bits.data := new LoadGen(uncachedResp.size, uncachedResp.signed, uncachedResp.addr, s1_uncached_data_word, false.B, wordBytes).data
resp.bits.data_raw := s1_uncached_data_word
when (grantIsUncachedData && !resp.ready) {
tl_out.d.ready := false.B
}
}
// load data subword mux/sign extension
val s2_data_word = (0 until rowBits by wordBits).map(i => s2_data_uncorrected(wordBits+i-1,i)).reduce(_|_)
val s2_data_word_corrected = (0 until rowBits by wordBits).map(i => s2_data_corrected(wordBits+i-1,i)).reduce(_|_)
val s2_data_word_possibly_uncached = Mux(cacheParams.pipelineWayMux.B && doUncachedResp, s2_uncached_data_word, 0.U) | s2_data_word
val loadgen = new LoadGen(s2_req.size, s2_req.signed, s2_req.addr, s2_data_word_possibly_uncached, s2_sc, wordBytes)
io.cpu.resp.bits.data := loadgen.data | s2_sc_fail
io.cpu.resp.bits.data_word_bypass := loadgen.wordData
io.cpu.resp.bits.data_raw := s2_data_word
io.cpu.resp.bits.store_data := pstore1_data
// AMOs
if (usingRMW) {
val amoalus = (0 until coreDataBits / xLen).map { i =>
val amoalu = Module(new AMOALU(xLen))
amoalu.io.mask := pstore1_mask >> (i * xBytes)
amoalu.io.cmd := (if (usingAtomicsInCache) pstore1_cmd else M_XWR)
amoalu.io.lhs := s2_data_word >> (i * xLen)
amoalu.io.rhs := pstore1_data >> (i * xLen)
amoalu
}
pstore1_storegen_data := (if (!usingDataScratchpad) amoalus.map(_.io.out).asUInt else {
val mask = FillInterleaved(8, Mux(s2_correct, 0.U, pstore1_mask))
amoalus.map(_.io.out_unmasked).asUInt & mask | s2_data_word_corrected & ~mask
})
} else if (!usingAtomics) {
assert(!(s1_valid_masked && s1_read && s1_write), "unsupported D$ operation")
}
if (coreParams.useVector) {
edge.manager.managers.foreach { m =>
// Statically ensure that no-allocate accesses are permitted.
// We could consider turning some of these into dynamic PMA checks.
require(!m.supportsAcquireB || m.supportsGet, "With a vector unit, cacheable memory must support Get")
require(!m.supportsAcquireT || m.supportsPutPartial, "With a vector unit, cacheable memory must support PutPartial")
}
}
// flushes
if (!usingDataScratchpad)
when (RegNext(reset.asBool)) { resetting := true.B }
val flushCounterNext = flushCounter +& 1.U
val flushDone = (flushCounterNext >> log2Ceil(nSets)) === nWays.U
val flushCounterWrap = flushCounterNext(log2Ceil(nSets)-1, 0)
ccover(s2_valid_masked && s2_cmd_flush_all && s2_meta_error, "TAG_ECC_ERROR_DURING_FENCE_I", "D$ ECC error in tag array during cache flush")
ccover(s2_valid_masked && s2_cmd_flush_all && s2_data_error, "DATA_ECC_ERROR_DURING_FENCE_I", "D$ ECC error in data array during cache flush")
s1_flush_valid := metaArb.io.in(5).fire && !s1_flush_valid && !s2_flush_valid_pre_tag_ecc && release_state === s_ready && !release_ack_wait
metaArb.io.in(5).valid := flushing && !flushed
metaArb.io.in(5).bits.write := false.B
metaArb.io.in(5).bits.idx := flushCounter(idxBits-1, 0)
metaArb.io.in(5).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaArb.io.in(5).bits.idx << blockOffBits)
metaArb.io.in(5).bits.way_en := metaArb.io.in(4).bits.way_en
metaArb.io.in(5).bits.data := metaArb.io.in(4).bits.data
// Only flush D$ on FENCE.I if some cached executable regions are untracked.
if (supports_flush) {
when (s2_valid_masked && s2_cmd_flush_all) {
when (!flushed && !io.cpu.s2_kill && !release_ack_wait && !uncachedInFlight.asUInt.orR) {
flushing := true.B
flushing_req := s2_req
}
}
when (tl_out_a.fire && !s2_uncached) { flushed := false.B }
when (flushing) {
s1_victim_way := flushCounter >> log2Up(nSets)
when (s2_flush_valid) {
flushCounter := flushCounterNext
when (flushDone) {
flushed := true.B
if (!isPow2(nWays)) flushCounter := flushCounterWrap
}
}
when (flushed && release_state === s_ready && !release_ack_wait) {
flushing := false.B
}
}
}
metaArb.io.in(0).valid := resetting
metaArb.io.in(0).bits := metaArb.io.in(5).bits
metaArb.io.in(0).bits.write := true.B
metaArb.io.in(0).bits.way_en := ~0.U(nWays.W)
metaArb.io.in(0).bits.data := tECC.encode(L1Metadata(0.U, ClientMetadata.onReset).asUInt)
when (resetting) {
flushCounter := flushCounterNext
when (flushDone) {
resetting := false.B
if (!isPow2(nWays)) flushCounter := flushCounterWrap
}
}
// gate the clock
clock_en_reg := !cacheParams.clockGate.B ||
io.ptw.customCSRs.disableDCacheClockGate ||
io.cpu.keep_clock_enabled ||
metaArb.io.out.valid || // subsumes resetting || flushing
s1_probe || s2_probe ||
s1_valid || s2_valid ||
io.tlb_port.req.valid ||
s1_tlb_req_valid || s2_tlb_req_valid ||
pstore1_held || pstore2_valid ||
release_state =/= s_ready ||
release_ack_wait || !release_queue_empty ||
!tlb.io.req.ready ||
cached_grant_wait || uncachedInFlight.asUInt.orR ||
lrscCount > 0.U || blockProbeAfterGrantCount > 0.U
// performance events
io.cpu.perf.acquire := edge.done(tl_out_a)
io.cpu.perf.release := edge.done(tl_out_c)
io.cpu.perf.grant := tl_out.d.valid && d_last
io.cpu.perf.tlbMiss := io.ptw.req.fire
io.cpu.perf.storeBufferEmptyAfterLoad := !(
(s1_valid && s1_write) ||
((s2_valid && s2_write && !s2_waw_hazard) || pstore1_held) ||
pstore2_valid)
io.cpu.perf.storeBufferEmptyAfterStore := !(
(s1_valid && s1_write) ||
(s2_valid && s2_write && pstore1_rmw) ||
((s2_valid && s2_write && !s2_waw_hazard || pstore1_held) && pstore2_valid))
io.cpu.perf.canAcceptStoreThenLoad := !(
((s2_valid && s2_write && pstore1_rmw) && (s1_valid && s1_write && !s1_waw_hazard)) ||
(pstore2_valid && pstore1_valid_likely && (s1_valid && s1_write)))
io.cpu.perf.canAcceptStoreThenRMW := io.cpu.perf.canAcceptStoreThenLoad && !pstore2_valid
io.cpu.perf.canAcceptLoadThenLoad := !((s1_valid && s1_write && needsRead(s1_req)) && ((s2_valid && s2_write && !s2_waw_hazard || pstore1_held) || pstore2_valid))
io.cpu.perf.blocked := {
// stop reporting blocked just before unblocking to avoid overly conservative stalling
val beatsBeforeEnd = outer.crossing match {
case SynchronousCrossing(_) => 2
case RationalCrossing(_) => 1 // assumes 1 < ratio <= 2; need more bookkeeping for optimal handling of >2
case _: AsynchronousCrossing => 1 // likewise
case _: CreditedCrossing => 1 // likewise
}
val near_end_of_refill = if (cacheBlockBytes / beatBytes <= beatsBeforeEnd) tl_out.d.valid else {
val refill_count = RegInit(0.U((cacheBlockBytes / beatBytes).log2.W))
when (tl_out.d.fire && grantIsRefill) { refill_count := refill_count + 1.U }
refill_count >= (cacheBlockBytes / beatBytes - beatsBeforeEnd).U
}
cached_grant_wait && !near_end_of_refill
}
// report errors
val (data_error, data_error_uncorrectable, data_error_addr) =
if (usingDataScratchpad) (s2_valid_data_error, s2_data_error_uncorrectable, s2_req.addr) else {
(RegNext(tl_out_c.fire && inWriteback && s2_data_error),
RegNext(s2_data_error_uncorrectable),
probe_bits.address) // This is stable for a cycle after tl_out_c.fire, so don't need a register
}
{
val error_addr =
Mux(metaArb.io.in(1).valid, Cat(s2_first_meta_corrected.tag, metaArb.io.in(1).bits.addr(tagLSB-1, idxLSB)),
data_error_addr >> idxLSB) << idxLSB
io.errors.uncorrectable.foreach { u =>
u.valid := metaArb.io.in(1).valid && s2_meta_error_uncorrectable || data_error && data_error_uncorrectable
u.bits := error_addr
}
io.errors.correctable.foreach { c =>
c.valid := metaArb.io.in(1).valid || data_error
c.bits := error_addr
io.errors.uncorrectable.foreach { u => when (u.valid) { c.valid := false.B } }
}
io.errors.bus.valid := tl_out.d.fire && (tl_out.d.bits.denied || tl_out.d.bits.corrupt)
io.errors.bus.bits := Mux(grantIsCached, s2_req.addr >> idxLSB << idxLSB, 0.U)
ccoverNotScratchpad(io.errors.bus.valid && grantIsCached, "D_ERROR_CACHED", "D$ D-channel error, cached")
ccover(io.errors.bus.valid && !grantIsCached, "D_ERROR_UNCACHED", "D$ D-channel error, uncached")
}
if (usingDataScratchpad) {
val data_error_cover = Seq(
property.CoverBoolean(!data_error, Seq("no_data_error")),
property.CoverBoolean(data_error && !data_error_uncorrectable, Seq("data_correctable_error")),
property.CoverBoolean(data_error && data_error_uncorrectable, Seq("data_uncorrectable_error")))
val request_source = Seq(
property.CoverBoolean(s2_isSlavePortAccess, Seq("from_TL")),
property.CoverBoolean(!s2_isSlavePortAccess, Seq("from_CPU")))
property.cover(new property.CrossProperty(
Seq(data_error_cover, request_source),
Seq(),
"MemorySystem;;Scratchpad Memory Bit Flip Cross Covers"))
} else {
val data_error_type = Seq(
property.CoverBoolean(!s2_valid_data_error, Seq("no_data_error")),
property.CoverBoolean(s2_valid_data_error && !s2_data_error_uncorrectable, Seq("data_correctable_error")),
property.CoverBoolean(s2_valid_data_error && s2_data_error_uncorrectable, Seq("data_uncorrectable_error")))
val data_error_dirty = Seq(
property.CoverBoolean(!s2_victim_dirty, Seq("data_clean")),
property.CoverBoolean(s2_victim_dirty, Seq("data_dirty")))
val request_source = if (supports_flush) {
Seq(
property.CoverBoolean(!flushing, Seq("access")),
property.CoverBoolean(flushing, Seq("during_flush")))
} else {
Seq(property.CoverBoolean(true.B, Seq("never_flush")))
}
val tag_error_cover = Seq(
property.CoverBoolean( !s2_meta_error, Seq("no_tag_error")),
property.CoverBoolean( s2_meta_error && !s2_meta_error_uncorrectable, Seq("tag_correctable_error")),
property.CoverBoolean( s2_meta_error && s2_meta_error_uncorrectable, Seq("tag_uncorrectable_error")))
property.cover(new property.CrossProperty(
Seq(data_error_type, data_error_dirty, request_source, tag_error_cover),
Seq(),
"MemorySystem;;Cache Memory Bit Flip Cross Covers"))
}
} // leaving gated-clock domain
val dcacheImpl = withClock (gated_clock) { new DCacheModuleImpl }
def encodeData(x: UInt, poison: Bool) = x.grouped(eccBits).map(dECC.encode(_, if (dECC.canDetect) poison else false.B)).asUInt
def dummyEncodeData(x: UInt) = x.grouped(eccBits).map(dECC.swizzle(_)).asUInt
def decodeData(x: UInt) = x.grouped(dECC.width(eccBits)).map(dECC.decode(_))
def eccMask(byteMask: UInt) = byteMask.grouped(eccBytes).map(_.orR).asUInt
def eccByteMask(byteMask: UInt) = FillInterleaved(eccBytes, eccMask(byteMask))
def likelyNeedsRead(req: HellaCacheReq) = {
val res = !req.cmd.isOneOf(M_XWR, M_PFW) || req.size < log2Ceil(eccBytes).U
assert(!needsRead(req) || res)
res
}
def needsRead(req: HellaCacheReq) =
isRead(req.cmd) ||
(isWrite(req.cmd) && (req.cmd === M_PWR || req.size < log2Ceil(eccBytes).U))
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"DCACHE_$label", "MemorySystem;;" + desc)
def ccoverNotScratchpad(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
if (!usingDataScratchpad) ccover(cond, label, desc)
require(!usingVM || tagLSB <= pgIdxBits, s"D$$ set size must not exceed ${1<<(pgIdxBits-10)} KiB; got ${(nSets * cacheBlockBytes)>>10} KiB")
def tagLSB: Int = untagBits
def probeIdx(b: TLBundleB): UInt = b.address(idxMSB, idxLSB)
def addressToProbe(vaddr: UInt, paddr: UInt): TLBundleB = {
val res = Wire(new TLBundleB(edge.bundle))
res :#= DontCare
res.address := paddr
res.source := (mmioOffset - 1).U
res
}
def acquire(vaddr: UInt, paddr: UInt, param: UInt): TLBundleA = {
if (!edge.manager.anySupportAcquireB) WireDefault(0.U.asTypeOf(new TLBundleA(edge.bundle)))
else edge.AcquireBlock(0.U, paddr >> lgCacheBlockBytes << lgCacheBlockBytes, lgCacheBlockBytes.U, param)._2
}
}
File DescribedSRAM.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File AMOALU.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
class StoreGen(typ: UInt, addr: UInt, dat: UInt, maxSize: Int) {
val size = Wire(UInt(log2Up(log2Up(maxSize)+1).W))
size := typ
val dat_padded = dat.pad(maxSize*8)
def misaligned: Bool =
(addr & ((1.U << size) - 1.U)(log2Up(maxSize)-1,0)).orR
def mask = {
var res = 1.U
for (i <- 0 until log2Up(maxSize)) {
val upper = Mux(addr(i), res, 0.U) | Mux(size >= (i+1).U, ((BigInt(1) << (1 << i))-1).U, 0.U)
val lower = Mux(addr(i), 0.U, res)
res = Cat(upper, lower)
}
res
}
protected def genData(i: Int): UInt =
if (i >= log2Up(maxSize)) dat_padded
else Mux(size === i.U, Fill(1 << (log2Up(maxSize)-i), dat_padded((8 << i)-1,0)), genData(i+1))
def data = genData(0)
def wordData = genData(2)
}
class LoadGen(typ: UInt, signed: Bool, addr: UInt, dat: UInt, zero: Bool, maxSize: Int) {
private val size = new StoreGen(typ, addr, dat, maxSize).size
private def genData(logMinSize: Int): UInt = {
var res = dat
for (i <- log2Up(maxSize)-1 to logMinSize by -1) {
val pos = 8 << i
val shifted = Mux(addr(i), res(2*pos-1,pos), res(pos-1,0))
val doZero = (i == 0).B && zero
val zeroed = Mux(doZero, 0.U, shifted)
res = Cat(Mux(size === i.U || doZero, Fill(8*maxSize-pos, signed && zeroed(pos-1)), res(8*maxSize-1,pos)), zeroed)
}
res
}
def wordData = genData(2)
def data = genData(0)
}
class AMOALU(operandBits: Int)(implicit p: Parameters) extends Module {
val minXLen = 32
val widths = (0 to log2Ceil(operandBits / minXLen)).map(minXLen << _)
val io = IO(new Bundle {
val mask = Input(UInt((operandBits / 8).W))
val cmd = Input(UInt(M_SZ.W))
val lhs = Input(UInt(operandBits.W))
val rhs = Input(UInt(operandBits.W))
val out = Output(UInt(operandBits.W))
val out_unmasked = Output(UInt(operandBits.W))
})
val max = io.cmd === M_XA_MAX || io.cmd === M_XA_MAXU
val min = io.cmd === M_XA_MIN || io.cmd === M_XA_MINU
val add = io.cmd === M_XA_ADD
val logic_and = io.cmd === M_XA_OR || io.cmd === M_XA_AND
val logic_xor = io.cmd === M_XA_XOR || io.cmd === M_XA_OR
val adder_out = {
// partition the carry chain to support sub-xLen addition
val mask = ~(0.U(operandBits.W) +: widths.init.map(w => !io.mask(w/8-1) << (w-1))).reduce(_|_)
(io.lhs & mask) + (io.rhs & mask)
}
val less = {
// break up the comparator so the lower parts will be CSE'd
def isLessUnsigned(x: UInt, y: UInt, n: Int): Bool = {
if (n == minXLen) x(n-1, 0) < y(n-1, 0)
else x(n-1, n/2) < y(n-1, n/2) || x(n-1, n/2) === y(n-1, n/2) && isLessUnsigned(x, y, n/2)
}
def isLess(x: UInt, y: UInt, n: Int): Bool = {
val signed = {
val mask = M_XA_MIN ^ M_XA_MINU
(io.cmd & mask) === (M_XA_MIN & mask)
}
Mux(x(n-1) === y(n-1), isLessUnsigned(x, y, n), Mux(signed, x(n-1), y(n-1)))
}
PriorityMux(widths.reverse.map(w => (io.mask(w/8/2), isLess(io.lhs, io.rhs, w))))
}
val minmax = Mux(Mux(less, min, max), io.lhs, io.rhs)
val logic =
Mux(logic_and, io.lhs & io.rhs, 0.U) |
Mux(logic_xor, io.lhs ^ io.rhs, 0.U)
val out =
Mux(add, adder_out,
Mux(logic_and || logic_xor, logic,
minmax))
val wmask = FillInterleaved(8, io.mask)
io.out := wmask & out | ~wmask & io.lhs
io.out_unmasked := out
}
| module MiniDCache_1( // @[DCache.scala:101:7]
input clock, // @[DCache.scala:101:7]
input reset, // @[DCache.scala:101:7]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_b_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_b_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_out_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_out_b_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_b_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_b_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_b_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_e_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output io_cpu_req_ready, // @[HellaCache.scala:243:14]
input io_cpu_req_valid, // @[HellaCache.scala:243:14]
input [39:0] io_cpu_req_bits_addr, // @[HellaCache.scala:243:14]
input [7:0] io_cpu_req_bits_tag, // @[HellaCache.scala:243:14]
input [1:0] io_cpu_req_bits_dprv, // @[HellaCache.scala:243:14]
input io_cpu_req_bits_dv, // @[HellaCache.scala:243:14]
input io_cpu_req_bits_phys, // @[HellaCache.scala:243:14]
input io_cpu_s1_kill, // @[HellaCache.scala:243:14]
input [63:0] io_cpu_s1_data_data, // @[HellaCache.scala:243:14]
input [7:0] io_cpu_s1_data_mask, // @[HellaCache.scala:243:14]
output io_cpu_s2_nack, // @[HellaCache.scala:243:14]
output io_cpu_s2_nack_cause_raw, // @[HellaCache.scala:243:14]
output io_cpu_s2_uncached, // @[HellaCache.scala:243:14]
output [31:0] io_cpu_s2_paddr, // @[HellaCache.scala:243:14]
output io_cpu_resp_valid, // @[HellaCache.scala:243:14]
output [39:0] io_cpu_resp_bits_addr, // @[HellaCache.scala:243:14]
output [7:0] io_cpu_resp_bits_tag, // @[HellaCache.scala:243:14]
output [4:0] io_cpu_resp_bits_cmd, // @[HellaCache.scala:243:14]
output [1:0] io_cpu_resp_bits_size, // @[HellaCache.scala:243:14]
output io_cpu_resp_bits_signed, // @[HellaCache.scala:243:14]
output [1:0] io_cpu_resp_bits_dprv, // @[HellaCache.scala:243:14]
output io_cpu_resp_bits_dv, // @[HellaCache.scala:243:14]
output [63:0] io_cpu_resp_bits_data, // @[HellaCache.scala:243:14]
output [7:0] io_cpu_resp_bits_mask, // @[HellaCache.scala:243:14]
output io_cpu_resp_bits_replay, // @[HellaCache.scala:243:14]
output io_cpu_resp_bits_has_data, // @[HellaCache.scala:243:14]
output [63:0] io_cpu_resp_bits_data_word_bypass, // @[HellaCache.scala:243:14]
output [63:0] io_cpu_resp_bits_data_raw, // @[HellaCache.scala:243:14]
output [63:0] io_cpu_resp_bits_store_data, // @[HellaCache.scala:243:14]
output io_cpu_replay_next, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_ma_ld, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_ma_st, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_pf_ld, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_pf_st, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_ae_ld, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_ae_st, // @[HellaCache.scala:243:14]
output [39:0] io_cpu_s2_gpa, // @[HellaCache.scala:243:14]
output io_cpu_ordered, // @[HellaCache.scala:243:14]
output io_cpu_store_pending, // @[HellaCache.scala:243:14]
output io_cpu_perf_acquire, // @[HellaCache.scala:243:14]
output io_cpu_perf_release, // @[HellaCache.scala:243:14]
output io_cpu_perf_grant, // @[HellaCache.scala:243:14]
output io_cpu_perf_tlbMiss, // @[HellaCache.scala:243:14]
output io_cpu_perf_blocked, // @[HellaCache.scala:243:14]
output io_cpu_perf_canAcceptStoreThenLoad, // @[HellaCache.scala:243:14]
output io_cpu_perf_canAcceptStoreThenRMW, // @[HellaCache.scala:243:14]
output io_cpu_perf_canAcceptLoadThenLoad, // @[HellaCache.scala:243:14]
output io_cpu_perf_storeBufferEmptyAfterLoad, // @[HellaCache.scala:243:14]
output io_cpu_perf_storeBufferEmptyAfterStore, // @[HellaCache.scala:243:14]
input io_ptw_req_ready, // @[HellaCache.scala:243:14]
output io_ptw_req_valid, // @[HellaCache.scala:243:14]
output [26:0] io_ptw_req_bits_bits_addr, // @[HellaCache.scala:243:14]
output io_ptw_req_bits_bits_need_gpa, // @[HellaCache.scala:243:14]
input io_ptw_resp_valid, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_ae_ptw, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_ae_final, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pf, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_gf, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_hr, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_hw, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_hx, // @[HellaCache.scala:243:14]
input [9:0] io_ptw_resp_bits_pte_reserved_for_future, // @[HellaCache.scala:243:14]
input [43:0] io_ptw_resp_bits_pte_ppn, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_resp_bits_pte_reserved_for_software, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_d, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_a, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_g, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_u, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_x, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_w, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_r, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_v, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_resp_bits_level, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_homogeneous, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_gpa_valid, // @[HellaCache.scala:243:14]
input [38:0] io_ptw_resp_bits_gpa_bits, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_gpa_is_pte, // @[HellaCache.scala:243:14]
input [3:0] io_ptw_ptbr_mode, // @[HellaCache.scala:243:14]
input [15:0] io_ptw_ptbr_asid, // @[HellaCache.scala:243:14]
input [43:0] io_ptw_ptbr_ppn, // @[HellaCache.scala:243:14]
input io_ptw_status_debug, // @[HellaCache.scala:243:14]
input io_ptw_status_cease, // @[HellaCache.scala:243:14]
input io_ptw_status_wfi, // @[HellaCache.scala:243:14]
input [31:0] io_ptw_status_isa, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_status_dprv, // @[HellaCache.scala:243:14]
input io_ptw_status_dv, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_status_prv, // @[HellaCache.scala:243:14]
input io_ptw_status_v, // @[HellaCache.scala:243:14]
input io_ptw_status_sd, // @[HellaCache.scala:243:14]
input [22:0] io_ptw_status_zero2, // @[HellaCache.scala:243:14]
input io_ptw_status_mpv, // @[HellaCache.scala:243:14]
input io_ptw_status_gva, // @[HellaCache.scala:243:14]
input io_ptw_status_mbe, // @[HellaCache.scala:243:14]
input io_ptw_status_sbe, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_status_sxl, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_status_uxl, // @[HellaCache.scala:243:14]
input io_ptw_status_sd_rv32, // @[HellaCache.scala:243:14]
input [7:0] io_ptw_status_zero1, // @[HellaCache.scala:243:14]
input io_ptw_status_tsr, // @[HellaCache.scala:243:14]
input io_ptw_status_tw, // @[HellaCache.scala:243:14]
input io_ptw_status_tvm, // @[HellaCache.scala:243:14]
input io_ptw_status_mxr, // @[HellaCache.scala:243:14]
input io_ptw_status_sum, // @[HellaCache.scala:243:14]
input io_ptw_status_mprv, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_status_xs, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_status_fs, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_status_mpp, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_status_vs, // @[HellaCache.scala:243:14]
input io_ptw_status_spp, // @[HellaCache.scala:243:14]
input io_ptw_status_mpie, // @[HellaCache.scala:243:14]
input io_ptw_status_ube, // @[HellaCache.scala:243:14]
input io_ptw_status_spie, // @[HellaCache.scala:243:14]
input io_ptw_status_upie, // @[HellaCache.scala:243:14]
input io_ptw_status_mie, // @[HellaCache.scala:243:14]
input io_ptw_status_hie, // @[HellaCache.scala:243:14]
input io_ptw_status_sie, // @[HellaCache.scala:243:14]
input io_ptw_status_uie // @[HellaCache.scala:243:14]
);
wire [23:0] s2_meta_corrected_3_tag; // @[DCache.scala:361:99]
wire [1:0] s2_meta_corrected_3_coh_state; // @[DCache.scala:361:99]
wire [63:0] s1_all_data_ways_3; // @[DCache.scala:325:33]
wire [63:0] s1_all_data_ways_2; // @[DCache.scala:325:33]
wire [63:0] s1_all_data_ways_1; // @[DCache.scala:325:33]
wire [63:0] s1_all_data_ways_0; // @[DCache.scala:325:33]
wire rerocc_tile_dcache_tag_array_MPORT_en; // @[DCache.scala:310:27]
wire s0_req_phys; // @[DCache.scala:192:24]
wire [39:0] s0_req_addr; // @[DCache.scala:192:24]
wire tl_out_a_valid; // @[DCache.scala:159:22]
wire [63:0] tl_out_a_bits_data; // @[DCache.scala:159:22]
wire [7:0] tl_out_a_bits_mask; // @[DCache.scala:159:22]
wire [31:0] tl_out_a_bits_address; // @[DCache.scala:159:22]
wire tl_out_a_bits_source; // @[DCache.scala:159:22]
wire [3:0] tl_out_a_bits_size; // @[DCache.scala:159:22]
wire [2:0] tl_out_a_bits_param; // @[DCache.scala:159:22]
wire [2:0] tl_out_a_bits_opcode; // @[DCache.scala:159:22]
wire [1:0] metaArb_io_out_bits_idx; // @[DCache.scala:135:28]
wire metaArb_io_in_0_valid; // @[DCache.scala:135:28]
wire [4:0] pma_checker_io_req_bits_cmd; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_req_bits_size; // @[DCache.scala:120:32]
wire [103:0] _rerocc_tile_dcache_tag_array_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire _lfsr_prng_io_out_0; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_1; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_2; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_3; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_4; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_5; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_6; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_7; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_8; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_9; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_10; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_11; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_12; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_13; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_14; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_15; // @[PRNG.scala:91:22]
wire [19:0] _pma_checker_entries_barrier_12_io_y_ppn; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_12_io_y_hr; // @[package.scala:267:25]
wire [19:0] _pma_checker_entries_barrier_11_io_y_ppn; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_11_io_y_c; // @[package.scala:267:25]
wire [19:0] _pma_checker_entries_barrier_10_io_y_ppn; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_10_io_y_c; // @[package.scala:267:25]
wire [19:0] _pma_checker_entries_barrier_9_io_y_ppn; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_9_io_y_c; // @[package.scala:267:25]
wire [19:0] _pma_checker_entries_barrier_8_io_y_ppn; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_8_io_y_c; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_7_io_y_c; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_6_io_y_c; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_5_io_y_c; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_4_io_y_c; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_3_io_y_c; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_2_io_y_c; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_1_io_y_c; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_u; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_ae_ptw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_ae_final; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_ae_stage2; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_pf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_gf; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_sw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_sx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_sr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_hw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_hx; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_hr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_pw; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_px; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_pr; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_ppp; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_pal; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_paa; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_eff; // @[package.scala:267:25]
wire _pma_checker_entries_barrier_io_y_c; // @[package.scala:267:25]
wire _pma_checker_pma_io_resp_r; // @[TLB.scala:422:19]
wire _pma_checker_pma_io_resp_w; // @[TLB.scala:422:19]
wire _pma_checker_pma_io_resp_pp; // @[TLB.scala:422:19]
wire _pma_checker_pma_io_resp_al; // @[TLB.scala:422:19]
wire _pma_checker_pma_io_resp_aa; // @[TLB.scala:422:19]
wire _pma_checker_pma_io_resp_x; // @[TLB.scala:422:19]
wire _pma_checker_pma_io_resp_eff; // @[TLB.scala:422:19]
wire [19:0] _pma_checker_mpu_ppn_barrier_io_y_ppn; // @[package.scala:267:25]
wire _tlb_io_req_ready; // @[DCache.scala:119:19]
wire _tlb_io_resp_miss; // @[DCache.scala:119:19]
wire [31:0] _tlb_io_resp_paddr; // @[DCache.scala:119:19]
wire [39:0] _tlb_io_resp_gpa; // @[DCache.scala:119:19]
wire _tlb_io_resp_pf_ld; // @[DCache.scala:119:19]
wire _tlb_io_resp_pf_st; // @[DCache.scala:119:19]
wire _tlb_io_resp_pf_inst; // @[DCache.scala:119:19]
wire _tlb_io_resp_ae_ld; // @[DCache.scala:119:19]
wire _tlb_io_resp_ae_st; // @[DCache.scala:119:19]
wire _tlb_io_resp_ae_inst; // @[DCache.scala:119:19]
wire _tlb_io_resp_ma_ld; // @[DCache.scala:119:19]
wire _tlb_io_resp_ma_st; // @[DCache.scala:119:19]
wire _tlb_io_resp_cacheable; // @[DCache.scala:119:19]
wire _tlb_io_resp_must_alloc; // @[DCache.scala:119:19]
wire _tlb_io_resp_prefetchable; // @[DCache.scala:119:19]
wire [1:0] _tlb_io_resp_size; // @[DCache.scala:119:19]
wire [4:0] _tlb_io_resp_cmd; // @[DCache.scala:119:19]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[DCache.scala:101:7]
wire auto_out_b_valid_0 = auto_out_b_valid; // @[DCache.scala:101:7]
wire [2:0] auto_out_b_bits_opcode_0 = auto_out_b_bits_opcode; // @[DCache.scala:101:7]
wire [1:0] auto_out_b_bits_param_0 = auto_out_b_bits_param; // @[DCache.scala:101:7]
wire [3:0] auto_out_b_bits_size_0 = auto_out_b_bits_size; // @[DCache.scala:101:7]
wire auto_out_b_bits_source_0 = auto_out_b_bits_source; // @[DCache.scala:101:7]
wire [31:0] auto_out_b_bits_address_0 = auto_out_b_bits_address; // @[DCache.scala:101:7]
wire [7:0] auto_out_b_bits_mask_0 = auto_out_b_bits_mask; // @[DCache.scala:101:7]
wire [63:0] auto_out_b_bits_data_0 = auto_out_b_bits_data; // @[DCache.scala:101:7]
wire auto_out_b_bits_corrupt_0 = auto_out_b_bits_corrupt; // @[DCache.scala:101:7]
wire auto_out_c_ready_0 = auto_out_c_ready; // @[DCache.scala:101:7]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[DCache.scala:101:7]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[DCache.scala:101:7]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[DCache.scala:101:7]
wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[DCache.scala:101:7]
wire auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[DCache.scala:101:7]
wire [2:0] auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[DCache.scala:101:7]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[DCache.scala:101:7]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[DCache.scala:101:7]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[DCache.scala:101:7]
wire auto_out_e_ready_0 = auto_out_e_ready; // @[DCache.scala:101:7]
wire io_cpu_req_valid_0 = io_cpu_req_valid; // @[DCache.scala:101:7]
wire [39:0] io_cpu_req_bits_addr_0 = io_cpu_req_bits_addr; // @[DCache.scala:101:7]
wire [7:0] io_cpu_req_bits_tag_0 = io_cpu_req_bits_tag; // @[DCache.scala:101:7]
wire [1:0] io_cpu_req_bits_dprv_0 = io_cpu_req_bits_dprv; // @[DCache.scala:101:7]
wire io_cpu_req_bits_dv_0 = io_cpu_req_bits_dv; // @[DCache.scala:101:7]
wire io_cpu_req_bits_phys_0 = io_cpu_req_bits_phys; // @[DCache.scala:101:7]
wire io_cpu_s1_kill_0 = io_cpu_s1_kill; // @[DCache.scala:101:7]
wire [63:0] io_cpu_s1_data_data_0 = io_cpu_s1_data_data; // @[DCache.scala:101:7]
wire [7:0] io_cpu_s1_data_mask_0 = io_cpu_s1_data_mask; // @[DCache.scala:101:7]
wire io_ptw_req_ready_0 = io_ptw_req_ready; // @[DCache.scala:101:7]
wire io_ptw_resp_valid_0 = io_ptw_resp_valid; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_ae_ptw_0 = io_ptw_resp_bits_ae_ptw; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_ae_final_0 = io_ptw_resp_bits_ae_final; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_pf_0 = io_ptw_resp_bits_pf; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_gf_0 = io_ptw_resp_bits_gf; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_hr_0 = io_ptw_resp_bits_hr; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_hw_0 = io_ptw_resp_bits_hw; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_hx_0 = io_ptw_resp_bits_hx; // @[DCache.scala:101:7]
wire [9:0] io_ptw_resp_bits_pte_reserved_for_future_0 = io_ptw_resp_bits_pte_reserved_for_future; // @[DCache.scala:101:7]
wire [43:0] io_ptw_resp_bits_pte_ppn_0 = io_ptw_resp_bits_pte_ppn; // @[DCache.scala:101:7]
wire [1:0] io_ptw_resp_bits_pte_reserved_for_software_0 = io_ptw_resp_bits_pte_reserved_for_software; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_pte_d_0 = io_ptw_resp_bits_pte_d; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_pte_a_0 = io_ptw_resp_bits_pte_a; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_pte_g_0 = io_ptw_resp_bits_pte_g; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_pte_u_0 = io_ptw_resp_bits_pte_u; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_pte_x_0 = io_ptw_resp_bits_pte_x; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_pte_w_0 = io_ptw_resp_bits_pte_w; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_pte_r_0 = io_ptw_resp_bits_pte_r; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_pte_v_0 = io_ptw_resp_bits_pte_v; // @[DCache.scala:101:7]
wire [1:0] io_ptw_resp_bits_level_0 = io_ptw_resp_bits_level; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_homogeneous_0 = io_ptw_resp_bits_homogeneous; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_gpa_valid_0 = io_ptw_resp_bits_gpa_valid; // @[DCache.scala:101:7]
wire [38:0] io_ptw_resp_bits_gpa_bits_0 = io_ptw_resp_bits_gpa_bits; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_gpa_is_pte_0 = io_ptw_resp_bits_gpa_is_pte; // @[DCache.scala:101:7]
wire [3:0] io_ptw_ptbr_mode_0 = io_ptw_ptbr_mode; // @[DCache.scala:101:7]
wire [15:0] io_ptw_ptbr_asid_0 = io_ptw_ptbr_asid; // @[DCache.scala:101:7]
wire [43:0] io_ptw_ptbr_ppn_0 = io_ptw_ptbr_ppn; // @[DCache.scala:101:7]
wire io_ptw_status_debug_0 = io_ptw_status_debug; // @[DCache.scala:101:7]
wire io_ptw_status_cease_0 = io_ptw_status_cease; // @[DCache.scala:101:7]
wire io_ptw_status_wfi_0 = io_ptw_status_wfi; // @[DCache.scala:101:7]
wire [31:0] io_ptw_status_isa_0 = io_ptw_status_isa; // @[DCache.scala:101:7]
wire [1:0] io_ptw_status_dprv_0 = io_ptw_status_dprv; // @[DCache.scala:101:7]
wire io_ptw_status_dv_0 = io_ptw_status_dv; // @[DCache.scala:101:7]
wire [1:0] io_ptw_status_prv_0 = io_ptw_status_prv; // @[DCache.scala:101:7]
wire io_ptw_status_v_0 = io_ptw_status_v; // @[DCache.scala:101:7]
wire io_ptw_status_sd_0 = io_ptw_status_sd; // @[DCache.scala:101:7]
wire [22:0] io_ptw_status_zero2_0 = io_ptw_status_zero2; // @[DCache.scala:101:7]
wire io_ptw_status_mpv_0 = io_ptw_status_mpv; // @[DCache.scala:101:7]
wire io_ptw_status_gva_0 = io_ptw_status_gva; // @[DCache.scala:101:7]
wire io_ptw_status_mbe_0 = io_ptw_status_mbe; // @[DCache.scala:101:7]
wire io_ptw_status_sbe_0 = io_ptw_status_sbe; // @[DCache.scala:101:7]
wire [1:0] io_ptw_status_sxl_0 = io_ptw_status_sxl; // @[DCache.scala:101:7]
wire [1:0] io_ptw_status_uxl_0 = io_ptw_status_uxl; // @[DCache.scala:101:7]
wire io_ptw_status_sd_rv32_0 = io_ptw_status_sd_rv32; // @[DCache.scala:101:7]
wire [7:0] io_ptw_status_zero1_0 = io_ptw_status_zero1; // @[DCache.scala:101:7]
wire io_ptw_status_tsr_0 = io_ptw_status_tsr; // @[DCache.scala:101:7]
wire io_ptw_status_tw_0 = io_ptw_status_tw; // @[DCache.scala:101:7]
wire io_ptw_status_tvm_0 = io_ptw_status_tvm; // @[DCache.scala:101:7]
wire io_ptw_status_mxr_0 = io_ptw_status_mxr; // @[DCache.scala:101:7]
wire io_ptw_status_sum_0 = io_ptw_status_sum; // @[DCache.scala:101:7]
wire io_ptw_status_mprv_0 = io_ptw_status_mprv; // @[DCache.scala:101:7]
wire [1:0] io_ptw_status_xs_0 = io_ptw_status_xs; // @[DCache.scala:101:7]
wire [1:0] io_ptw_status_fs_0 = io_ptw_status_fs; // @[DCache.scala:101:7]
wire [1:0] io_ptw_status_mpp_0 = io_ptw_status_mpp; // @[DCache.scala:101:7]
wire [1:0] io_ptw_status_vs_0 = io_ptw_status_vs; // @[DCache.scala:101:7]
wire io_ptw_status_spp_0 = io_ptw_status_spp; // @[DCache.scala:101:7]
wire io_ptw_status_mpie_0 = io_ptw_status_mpie; // @[DCache.scala:101:7]
wire io_ptw_status_ube_0 = io_ptw_status_ube; // @[DCache.scala:101:7]
wire io_ptw_status_spie_0 = io_ptw_status_spie; // @[DCache.scala:101:7]
wire io_ptw_status_upie_0 = io_ptw_status_upie; // @[DCache.scala:101:7]
wire io_ptw_status_mie_0 = io_ptw_status_mie; // @[DCache.scala:101:7]
wire io_ptw_status_hie_0 = io_ptw_status_hie; // @[DCache.scala:101:7]
wire io_ptw_status_sie_0 = io_ptw_status_sie; // @[DCache.scala:101:7]
wire io_ptw_status_uie_0 = io_ptw_status_uie; // @[DCache.scala:101:7]
wire _dataArb_io_in_3_valid_T_55 = reset; // @[DCache.scala:1186:11]
wire _pstore_drain_opportunistic_T_55 = reset; // @[DCache.scala:1186:11]
wire [4:0] io_cpu_req_bits_cmd = 5'h0; // @[DCache.scala:101:7]
wire [4:0] io_ptw_hstatus_zero1 = 5'h0; // @[DCache.scala:101:7]
wire [4:0] io_tlb_port_req_bits_cmd = 5'h0; // @[DCache.scala:101:7]
wire [4:0] pma_checker_io_ptw_hstatus_zero1 = 5'h0; // @[DCache.scala:120:32]
wire [4:0] s0_req_cmd = 5'h0; // @[DCache.scala:192:24]
wire [4:0] s0_tlb_req_cmd = 5'h0; // @[DCache.scala:199:28]
wire [4:0] _io_cpu_s2_xcpt_WIRE_cmd = 5'h0; // @[DCache.scala:933:74]
wire [1:0] io_cpu_req_bits_size = 2'h3; // @[DCache.scala:101:7]
wire [1:0] s0_req_size = 2'h3; // @[DCache.scala:192:24]
wire [1:0] s0_tlb_req_size = 2'h3; // @[DCache.scala:199:28]
wire [1:0] _r_T_11 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_13 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_21 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_23 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] tl_out_a_bits_a_mask_lo_lo = 2'h3; // @[Misc.scala:222:10]
wire [1:0] tl_out_a_bits_a_mask_lo_hi = 2'h3; // @[Misc.scala:222:10]
wire [1:0] tl_out_a_bits_a_mask_hi_lo = 2'h3; // @[Misc.scala:222:10]
wire [1:0] tl_out_a_bits_a_mask_hi_hi = 2'h3; // @[Misc.scala:222:10]
wire [1:0] _metaArb_io_in_3_bits_data_T_8 = 2'h3; // @[Metadata.scala:24:15]
wire auto_out_a_bits_corrupt = 1'h0; // @[DCache.scala:101:7]
wire auto_out_c_bits_corrupt = 1'h0; // @[DCache.scala:101:7]
wire io_cpu_req_bits_signed = 1'h0; // @[DCache.scala:101:7]
wire io_cpu_req_bits_no_resp = 1'h0; // @[DCache.scala:101:7]
wire io_cpu_req_bits_no_alloc = 1'h0; // @[DCache.scala:101:7]
wire io_cpu_req_bits_no_xcpt = 1'h0; // @[DCache.scala:101:7]
wire io_cpu_s2_kill = 1'h0; // @[DCache.scala:101:7]
wire io_cpu_s2_xcpt_gf_ld = 1'h0; // @[DCache.scala:101:7]
wire io_cpu_s2_xcpt_gf_st = 1'h0; // @[DCache.scala:101:7]
wire io_cpu_s2_gpa_is_pte = 1'h0; // @[DCache.scala:101:7]
wire io_cpu_keep_clock_enabled = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_req_bits_bits_vstage1 = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_req_bits_bits_stage2 = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_resp_bits_fragmented_superpage = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_hstatus_vtsr = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_hstatus_vtw = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_hstatus_vtvm = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_hstatus_hu = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_hstatus_spvp = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_hstatus_spv = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_hstatus_gva = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_hstatus_vsbe = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_debug = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_cease = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_wfi = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_dv = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_v = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_sd = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_mpv = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_gva = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_mbe = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_sbe = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_sd_rv32 = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_tsr = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_tw = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_tvm = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_mxr = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_sum = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_mprv = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_spp = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_mpie = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_ube = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_spie = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_upie = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_mie = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_hie = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_sie = 1'h0; // @[DCache.scala:101:7]
wire io_ptw_gstatus_uie = 1'h0; // @[DCache.scala:101:7]
wire io_tlb_port_req_valid = 1'h0; // @[DCache.scala:101:7]
wire io_tlb_port_req_bits_passthrough = 1'h0; // @[DCache.scala:101:7]
wire io_tlb_port_req_bits_v = 1'h0; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_gpa_is_pte = 1'h0; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_gf_ld = 1'h0; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_gf_st = 1'h0; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_gf_inst = 1'h0; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_ma_inst = 1'h0; // @[DCache.scala:101:7]
wire io_tlb_port_s2_kill = 1'h0; // @[DCache.scala:101:7]
wire nodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire nodeOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire pma_checker_io_req_valid = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_resp_miss = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_resp_gpa_is_pte = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_resp_gf_ld = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_resp_gf_st = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_resp_gf_inst = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_resp_ma_inst = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_sfence_valid = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_sfence_bits_rs1 = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_sfence_bits_rs2 = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_sfence_bits_asid = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_sfence_bits_hv = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_sfence_bits_hg = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_req_ready = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_req_valid = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_req_bits_bits_need_gpa = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_req_bits_bits_vstage1 = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_req_bits_bits_stage2 = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_valid = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_ae_ptw = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_ae_final = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_pf = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_gf = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_hr = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_hw = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_hx = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_pte_d = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_pte_a = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_pte_g = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_pte_u = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_pte_x = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_pte_w = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_pte_r = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_pte_v = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_fragmented_superpage = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_homogeneous = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_gpa_valid = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_resp_bits_gpa_is_pte = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_debug = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_cease = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_wfi = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_dv = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_v = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_sd = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_mpv = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_gva = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_mbe = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_sbe = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_sd_rv32 = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_tsr = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_tw = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_tvm = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_mxr = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_sum = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_mprv = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_spp = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_mpie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_ube = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_spie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_upie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_mie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_hie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_sie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_status_uie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_hstatus_vtsr = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_hstatus_vtw = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_hstatus_vtvm = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_hstatus_hu = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_hstatus_spvp = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_hstatus_spv = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_hstatus_gva = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_hstatus_vsbe = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_debug = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_cease = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_wfi = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_dv = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_v = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_sd = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_mpv = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_gva = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_mbe = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_sbe = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_sd_rv32 = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_tsr = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_tw = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_tvm = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_mxr = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_sum = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_mprv = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_spp = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_mpie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_ube = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_spie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_upie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_mie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_hie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_sie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_gstatus_uie = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_io_kill = 1'h0; // @[DCache.scala:120:32]
wire pma_checker_priv_v = 1'h0; // @[TLB.scala:369:34]
wire pma_checker__stage1_en_T = 1'h0; // @[TLB.scala:374:41]
wire pma_checker_stage1_en = 1'h0; // @[TLB.scala:374:29]
wire pma_checker__vstage1_en_T = 1'h0; // @[TLB.scala:376:38]
wire pma_checker__vstage1_en_T_1 = 1'h0; // @[TLB.scala:376:68]
wire pma_checker_vstage1_en = 1'h0; // @[TLB.scala:376:48]
wire pma_checker__stage2_en_T = 1'h0; // @[TLB.scala:378:38]
wire pma_checker__stage2_en_T_1 = 1'h0; // @[TLB.scala:378:68]
wire pma_checker_stage2_en = 1'h0; // @[TLB.scala:378:48]
wire pma_checker__vm_enabled_T = 1'h0; // @[TLB.scala:399:31]
wire pma_checker__vm_enabled_T_1 = 1'h0; // @[TLB.scala:399:45]
wire pma_checker__vm_enabled_T_2 = 1'h0; // @[TLB.scala:399:64]
wire pma_checker_vm_enabled = 1'h0; // @[TLB.scala:399:61]
wire pma_checker__vsatp_mode_mismatch_T = 1'h0; // @[TLB.scala:403:52]
wire pma_checker__vsatp_mode_mismatch_T_1 = 1'h0; // @[TLB.scala:403:37]
wire pma_checker__vsatp_mode_mismatch_T_2 = 1'h0; // @[TLB.scala:403:81]
wire pma_checker_vsatp_mode_mismatch = 1'h0; // @[TLB.scala:403:78]
wire pma_checker_do_refill = 1'h0; // @[TLB.scala:408:29]
wire pma_checker__invalidate_refill_T = 1'h0; // @[package.scala:16:47]
wire pma_checker__invalidate_refill_T_1 = 1'h0; // @[package.scala:16:47]
wire pma_checker__invalidate_refill_T_2 = 1'h0; // @[package.scala:81:59]
wire pma_checker_invalidate_refill = 1'h0; // @[TLB.scala:410:88]
wire pma_checker__mpu_ppn_T = 1'h0; // @[TLB.scala:413:32]
wire pma_checker__sector_hits_T = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_1 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_2 = 1'h0; // @[package.scala:81:59]
wire pma_checker_sector_hits_0 = 1'h0; // @[TLB.scala:172:55]
wire pma_checker__sector_hits_T_8 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_9 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_10 = 1'h0; // @[package.scala:81:59]
wire pma_checker_sector_hits_1 = 1'h0; // @[TLB.scala:172:55]
wire pma_checker__sector_hits_T_16 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_17 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_18 = 1'h0; // @[package.scala:81:59]
wire pma_checker_sector_hits_2 = 1'h0; // @[TLB.scala:172:55]
wire pma_checker__sector_hits_T_24 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_25 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_26 = 1'h0; // @[package.scala:81:59]
wire pma_checker_sector_hits_3 = 1'h0; // @[TLB.scala:172:55]
wire pma_checker__sector_hits_T_32 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_33 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_34 = 1'h0; // @[package.scala:81:59]
wire pma_checker_sector_hits_4 = 1'h0; // @[TLB.scala:172:55]
wire pma_checker__sector_hits_T_40 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_41 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_42 = 1'h0; // @[package.scala:81:59]
wire pma_checker_sector_hits_5 = 1'h0; // @[TLB.scala:172:55]
wire pma_checker__sector_hits_T_48 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_49 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_50 = 1'h0; // @[package.scala:81:59]
wire pma_checker_sector_hits_6 = 1'h0; // @[TLB.scala:172:55]
wire pma_checker__sector_hits_T_56 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_57 = 1'h0; // @[package.scala:81:59]
wire pma_checker__sector_hits_T_58 = 1'h0; // @[package.scala:81:59]
wire pma_checker_sector_hits_7 = 1'h0; // @[TLB.scala:172:55]
wire pma_checker_superpage_hits_tagMatch = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__superpage_hits_ignore_T = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_superpage_hits_ignore = 1'h0; // @[TLB.scala:182:34]
wire pma_checker__superpage_hits_T_4 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__superpage_hits_T_9 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_superpage_hits_0 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_superpage_hits_tagMatch_1 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__superpage_hits_ignore_T_3 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_superpage_hits_ignore_3 = 1'h0; // @[TLB.scala:182:34]
wire pma_checker__superpage_hits_T_18 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__superpage_hits_T_23 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_superpage_hits_1 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_superpage_hits_tagMatch_2 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__superpage_hits_ignore_T_6 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_superpage_hits_ignore_6 = 1'h0; // @[TLB.scala:182:34]
wire pma_checker__superpage_hits_T_32 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__superpage_hits_T_37 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_superpage_hits_2 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_superpage_hits_tagMatch_3 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__superpage_hits_ignore_T_9 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_superpage_hits_ignore_9 = 1'h0; // @[TLB.scala:182:34]
wire pma_checker__superpage_hits_T_46 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__superpage_hits_T_51 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_superpage_hits_3 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_5 = 1'h0; // @[TLB.scala:188:18]
wire pma_checker_hitsVec_0 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker__hitsVec_T_11 = 1'h0; // @[TLB.scala:188:18]
wire pma_checker_hitsVec_1 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker__hitsVec_T_17 = 1'h0; // @[TLB.scala:188:18]
wire pma_checker_hitsVec_2 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker__hitsVec_T_23 = 1'h0; // @[TLB.scala:188:18]
wire pma_checker_hitsVec_3 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker__hitsVec_T_29 = 1'h0; // @[TLB.scala:188:18]
wire pma_checker_hitsVec_4 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker__hitsVec_T_35 = 1'h0; // @[TLB.scala:188:18]
wire pma_checker_hitsVec_5 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker__hitsVec_T_41 = 1'h0; // @[TLB.scala:188:18]
wire pma_checker_hitsVec_6 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker__hitsVec_T_47 = 1'h0; // @[TLB.scala:188:18]
wire pma_checker_hitsVec_7 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker_hitsVec_tagMatch = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__hitsVec_ignore_T = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore = 1'h0; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_52 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_57 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_62 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_hitsVec_8 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker_hitsVec_tagMatch_1 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__hitsVec_ignore_T_3 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore_3 = 1'h0; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_67 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_72 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_77 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_hitsVec_9 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker_hitsVec_tagMatch_2 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__hitsVec_ignore_T_6 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore_6 = 1'h0; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_82 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_87 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_92 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_hitsVec_10 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker_hitsVec_tagMatch_3 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__hitsVec_ignore_T_9 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore_9 = 1'h0; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_97 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_102 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_107 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_hitsVec_11 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker_hitsVec_tagMatch_4 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__hitsVec_ignore_T_12 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore_12 = 1'h0; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_112 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_117 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker__hitsVec_T_122 = 1'h0; // @[TLB.scala:183:29]
wire pma_checker_hitsVec_12 = 1'h0; // @[TLB.scala:440:44]
wire pma_checker_refill_v = 1'h0; // @[TLB.scala:448:33]
wire pma_checker_newEntry_u = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_g = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_ae_ptw = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_ae_final = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_ae_stage2 = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_pf = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_gf = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_sw = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_sx = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_sr = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_hw = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_hx = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_hr = 1'h0; // @[TLB.scala:449:24]
wire pma_checker_newEntry_fragmented_superpage = 1'h0; // @[TLB.scala:449:24]
wire pma_checker__newEntry_g_T = 1'h0; // @[TLB.scala:453:25]
wire pma_checker__newEntry_ae_stage2_T = 1'h0; // @[TLB.scala:456:53]
wire pma_checker__newEntry_ae_stage2_T_1 = 1'h0; // @[TLB.scala:456:84]
wire pma_checker__newEntry_sr_T_1 = 1'h0; // @[PTW.scala:141:44]
wire pma_checker__newEntry_sr_T_2 = 1'h0; // @[PTW.scala:141:38]
wire pma_checker__newEntry_sr_T_3 = 1'h0; // @[PTW.scala:141:32]
wire pma_checker__newEntry_sr_T_4 = 1'h0; // @[PTW.scala:141:52]
wire pma_checker__newEntry_sr_T_5 = 1'h0; // @[PTW.scala:149:35]
wire pma_checker__newEntry_sw_T_1 = 1'h0; // @[PTW.scala:141:44]
wire pma_checker__newEntry_sw_T_2 = 1'h0; // @[PTW.scala:141:38]
wire pma_checker__newEntry_sw_T_3 = 1'h0; // @[PTW.scala:141:32]
wire pma_checker__newEntry_sw_T_4 = 1'h0; // @[PTW.scala:141:52]
wire pma_checker__newEntry_sw_T_5 = 1'h0; // @[PTW.scala:151:35]
wire pma_checker__newEntry_sw_T_6 = 1'h0; // @[PTW.scala:151:40]
wire pma_checker__newEntry_sx_T_1 = 1'h0; // @[PTW.scala:141:44]
wire pma_checker__newEntry_sx_T_2 = 1'h0; // @[PTW.scala:141:38]
wire pma_checker__newEntry_sx_T_3 = 1'h0; // @[PTW.scala:141:32]
wire pma_checker__newEntry_sx_T_4 = 1'h0; // @[PTW.scala:141:52]
wire pma_checker__newEntry_sx_T_5 = 1'h0; // @[PTW.scala:153:35]
wire pma_checker__waddr_T = 1'h0; // @[TLB.scala:477:45]
wire pma_checker__superpage_entries_0_level_T = 1'h0; // @[package.scala:163:13]
wire pma_checker__superpage_entries_1_level_T = 1'h0; // @[package.scala:163:13]
wire pma_checker__superpage_entries_2_level_T = 1'h0; // @[package.scala:163:13]
wire pma_checker__superpage_entries_3_level_T = 1'h0; // @[package.scala:163:13]
wire pma_checker_sum = 1'h0; // @[TLB.scala:510:16]
wire pma_checker__mxr_T = 1'h0; // @[TLB.scala:518:36]
wire pma_checker_mxr = 1'h0; // @[TLB.scala:518:31]
wire pma_checker__bad_va_T = 1'h0; // @[TLB.scala:568:21]
wire pma_checker_bad_va = 1'h0; // @[TLB.scala:568:34]
wire pma_checker_cmd_lrsc = 1'h0; // @[TLB.scala:570:33]
wire pma_checker_cmd_amo_logical = 1'h0; // @[TLB.scala:571:40]
wire pma_checker_cmd_amo_arithmetic = 1'h0; // @[TLB.scala:572:43]
wire pma_checker_cmd_readx = 1'h0; // @[TLB.scala:575:37]
wire pma_checker__gf_ld_array_T = 1'h0; // @[TLB.scala:600:32]
wire pma_checker__gf_st_array_T = 1'h0; // @[TLB.scala:601:32]
wire pma_checker__gpa_hits_hit_mask_T_1 = 1'h0; // @[TLB.scala:606:60]
wire pma_checker_tlb_hit_if_not_gpa_miss = 1'h0; // @[TLB.scala:610:43]
wire pma_checker_tlb_hit = 1'h0; // @[TLB.scala:611:40]
wire pma_checker__tlb_miss_T_1 = 1'h0; // @[TLB.scala:613:29]
wire pma_checker__tlb_miss_T_3 = 1'h0; // @[TLB.scala:613:53]
wire pma_checker_tlb_miss = 1'h0; // @[TLB.scala:613:64]
wire pma_checker__state_vec_0_set_left_older_T = 1'h0; // @[Replacement.scala:196:43]
wire pma_checker__state_vec_0_set_left_older_T_1 = 1'h0; // @[Replacement.scala:196:43]
wire pma_checker_state_vec_0_left_subtree_state_1 = 1'h0; // @[package.scala:163:13]
wire pma_checker_state_vec_0_right_subtree_state_1 = 1'h0; // @[Replacement.scala:198:38]
wire pma_checker__state_vec_0_T_1 = 1'h0; // @[package.scala:163:13]
wire pma_checker__state_vec_0_T_2 = 1'h0; // @[Replacement.scala:218:17]
wire pma_checker__state_vec_0_T_4 = 1'h0; // @[Replacement.scala:203:16]
wire pma_checker__state_vec_0_T_5 = 1'h0; // @[Replacement.scala:207:62]
wire pma_checker__state_vec_0_T_6 = 1'h0; // @[Replacement.scala:218:17]
wire pma_checker__state_vec_0_set_left_older_T_2 = 1'h0; // @[Replacement.scala:196:43]
wire pma_checker_state_vec_0_left_subtree_state_2 = 1'h0; // @[package.scala:163:13]
wire pma_checker_state_vec_0_right_subtree_state_2 = 1'h0; // @[Replacement.scala:198:38]
wire pma_checker__state_vec_0_T_12 = 1'h0; // @[package.scala:163:13]
wire pma_checker__state_vec_0_T_13 = 1'h0; // @[Replacement.scala:218:17]
wire pma_checker__state_vec_0_T_15 = 1'h0; // @[Replacement.scala:203:16]
wire pma_checker__state_vec_0_T_16 = 1'h0; // @[Replacement.scala:207:62]
wire pma_checker__state_vec_0_T_17 = 1'h0; // @[Replacement.scala:218:17]
wire pma_checker__state_reg_set_left_older_T = 1'h0; // @[Replacement.scala:196:43]
wire pma_checker_state_reg_left_subtree_state = 1'h0; // @[package.scala:163:13]
wire pma_checker_state_reg_right_subtree_state = 1'h0; // @[Replacement.scala:198:38]
wire pma_checker__state_reg_T = 1'h0; // @[package.scala:163:13]
wire pma_checker__state_reg_T_1 = 1'h0; // @[Replacement.scala:218:17]
wire pma_checker__state_reg_T_3 = 1'h0; // @[Replacement.scala:203:16]
wire pma_checker__state_reg_T_4 = 1'h0; // @[Replacement.scala:207:62]
wire pma_checker__state_reg_T_5 = 1'h0; // @[Replacement.scala:218:17]
wire pma_checker__multipleHits_T_2 = 1'h0; // @[Misc.scala:181:37]
wire pma_checker_multipleHits_leftOne = 1'h0; // @[Misc.scala:178:18]
wire pma_checker__multipleHits_T_4 = 1'h0; // @[Misc.scala:181:37]
wire pma_checker_multipleHits_leftOne_1 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker__multipleHits_T_5 = 1'h0; // @[Misc.scala:182:39]
wire pma_checker_multipleHits_rightOne = 1'h0; // @[Misc.scala:178:18]
wire pma_checker_multipleHits_rightOne_1 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_6 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_7 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_rightTwo = 1'h0; // @[Misc.scala:183:49]
wire pma_checker_multipleHits_leftOne_2 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_8 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_9 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_leftTwo = 1'h0; // @[Misc.scala:183:49]
wire pma_checker__multipleHits_T_11 = 1'h0; // @[Misc.scala:181:37]
wire pma_checker_multipleHits_leftOne_3 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker__multipleHits_T_13 = 1'h0; // @[Misc.scala:181:37]
wire pma_checker_multipleHits_leftOne_4 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker__multipleHits_T_14 = 1'h0; // @[Misc.scala:182:39]
wire pma_checker_multipleHits_rightOne_2 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker_multipleHits_rightOne_3 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_15 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_16 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_rightTwo_1 = 1'h0; // @[Misc.scala:183:49]
wire pma_checker_multipleHits_rightOne_4 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_17 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_18 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_rightTwo_2 = 1'h0; // @[Misc.scala:183:49]
wire pma_checker_multipleHits_leftOne_5 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_19 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_20 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_leftTwo_1 = 1'h0; // @[Misc.scala:183:49]
wire pma_checker__multipleHits_T_23 = 1'h0; // @[Misc.scala:181:37]
wire pma_checker_multipleHits_leftOne_6 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker__multipleHits_T_25 = 1'h0; // @[Misc.scala:181:37]
wire pma_checker_multipleHits_leftOne_7 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker__multipleHits_T_26 = 1'h0; // @[Misc.scala:182:39]
wire pma_checker_multipleHits_rightOne_5 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker_multipleHits_rightOne_6 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_27 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_28 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_rightTwo_3 = 1'h0; // @[Misc.scala:183:49]
wire pma_checker_multipleHits_leftOne_8 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_29 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_30 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_leftTwo_2 = 1'h0; // @[Misc.scala:183:49]
wire pma_checker__multipleHits_T_33 = 1'h0; // @[Misc.scala:181:37]
wire pma_checker_multipleHits_leftOne_9 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker__multipleHits_T_34 = 1'h0; // @[Misc.scala:182:39]
wire pma_checker_multipleHits_rightOne_7 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker_multipleHits_leftOne_10 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_35 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_36 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_leftTwo_3 = 1'h0; // @[Misc.scala:183:49]
wire pma_checker__multipleHits_T_38 = 1'h0; // @[Misc.scala:181:37]
wire pma_checker_multipleHits_leftOne_11 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker__multipleHits_T_39 = 1'h0; // @[Misc.scala:182:39]
wire pma_checker_multipleHits_rightOne_8 = 1'h0; // @[Misc.scala:178:18]
wire pma_checker_multipleHits_rightOne_9 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_40 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_41 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_rightTwo_4 = 1'h0; // @[Misc.scala:183:49]
wire pma_checker_multipleHits_rightOne_10 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_42 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_43 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_rightTwo_5 = 1'h0; // @[Misc.scala:183:49]
wire pma_checker_multipleHits_rightOne_11 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_44 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_45 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits_rightTwo_6 = 1'h0; // @[Misc.scala:183:49]
wire pma_checker__multipleHits_T_46 = 1'h0; // @[Misc.scala:183:16]
wire pma_checker__multipleHits_T_47 = 1'h0; // @[Misc.scala:183:37]
wire pma_checker__multipleHits_T_48 = 1'h0; // @[Misc.scala:183:61]
wire pma_checker_multipleHits = 1'h0; // @[Misc.scala:183:49]
wire pma_checker__io_resp_pf_ld_T = 1'h0; // @[TLB.scala:633:28]
wire pma_checker__io_resp_pf_st_T = 1'h0; // @[TLB.scala:634:28]
wire pma_checker__io_resp_gf_ld_T = 1'h0; // @[TLB.scala:637:29]
wire pma_checker__io_resp_gf_ld_T_2 = 1'h0; // @[TLB.scala:637:66]
wire pma_checker__io_resp_gf_ld_T_3 = 1'h0; // @[TLB.scala:637:42]
wire pma_checker__io_resp_gf_st_T = 1'h0; // @[TLB.scala:638:29]
wire pma_checker__io_resp_gf_st_T_2 = 1'h0; // @[TLB.scala:638:73]
wire pma_checker__io_resp_gf_st_T_3 = 1'h0; // @[TLB.scala:638:49]
wire pma_checker__io_resp_gf_inst_T_1 = 1'h0; // @[TLB.scala:639:56]
wire pma_checker__io_resp_gf_inst_T_2 = 1'h0; // @[TLB.scala:639:30]
wire pma_checker__io_resp_miss_T = 1'h0; // @[TLB.scala:651:29]
wire pma_checker__io_resp_miss_T_1 = 1'h0; // @[TLB.scala:651:52]
wire pma_checker__io_resp_miss_T_2 = 1'h0; // @[TLB.scala:651:64]
wire pma_checker__io_resp_gpa_is_pte_T = 1'h0; // @[TLB.scala:655:36]
wire pma_checker__io_ptw_req_valid_T = 1'h0; // @[TLB.scala:662:29]
wire pma_checker_r_superpage_repl_addr_left_subtree_older = 1'h0; // @[Replacement.scala:243:38]
wire pma_checker_r_superpage_repl_addr_left_subtree_state = 1'h0; // @[package.scala:163:13]
wire pma_checker_r_superpage_repl_addr_right_subtree_state = 1'h0; // @[Replacement.scala:245:38]
wire pma_checker__r_superpage_repl_addr_T = 1'h0; // @[Replacement.scala:262:12]
wire pma_checker__r_superpage_repl_addr_T_1 = 1'h0; // @[Replacement.scala:262:12]
wire pma_checker__r_superpage_repl_addr_T_2 = 1'h0; // @[Replacement.scala:250:16]
wire pma_checker__r_superpage_repl_addr_T_4 = 1'h0; // @[TLB.scala:757:16]
wire pma_checker_r_sectored_repl_addr_left_subtree_older = 1'h0; // @[Replacement.scala:243:38]
wire pma_checker_r_sectored_repl_addr_left_subtree_older_1 = 1'h0; // @[Replacement.scala:243:38]
wire pma_checker_r_sectored_repl_addr_left_subtree_state_1 = 1'h0; // @[package.scala:163:13]
wire pma_checker_r_sectored_repl_addr_right_subtree_state_1 = 1'h0; // @[Replacement.scala:245:38]
wire pma_checker__r_sectored_repl_addr_T = 1'h0; // @[Replacement.scala:262:12]
wire pma_checker__r_sectored_repl_addr_T_1 = 1'h0; // @[Replacement.scala:262:12]
wire pma_checker__r_sectored_repl_addr_T_2 = 1'h0; // @[Replacement.scala:250:16]
wire pma_checker_r_sectored_repl_addr_left_subtree_older_2 = 1'h0; // @[Replacement.scala:243:38]
wire pma_checker_r_sectored_repl_addr_left_subtree_state_2 = 1'h0; // @[package.scala:163:13]
wire pma_checker_r_sectored_repl_addr_right_subtree_state_2 = 1'h0; // @[Replacement.scala:245:38]
wire pma_checker__r_sectored_repl_addr_T_4 = 1'h0; // @[Replacement.scala:262:12]
wire pma_checker__r_sectored_repl_addr_T_5 = 1'h0; // @[Replacement.scala:262:12]
wire pma_checker__r_sectored_repl_addr_T_6 = 1'h0; // @[Replacement.scala:250:16]
wire pma_checker__r_sectored_repl_addr_valids_T = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_1 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_2 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_3 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_4 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_5 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_6 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_7 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_8 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_9 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_10 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_11 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_12 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_13 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_14 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_15 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_16 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_17 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_18 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_19 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_20 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_21 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_22 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_valids_T_23 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_repl_addr_T_10 = 1'h0; // @[TLB.scala:757:16]
wire pma_checker__r_sectored_hit_valid_T = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_hit_valid_T_1 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_hit_valid_T_2 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_hit_valid_T_3 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_hit_valid_T_4 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_hit_valid_T_5 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_hit_valid_T_6 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_sectored_hit_bits_T_1 = 1'h0; // @[OneHot.scala:32:14]
wire pma_checker__r_sectored_hit_bits_T_3 = 1'h0; // @[OneHot.scala:32:14]
wire pma_checker__r_sectored_hit_bits_T_5 = 1'h0; // @[CircuitMath.scala:28:8]
wire pma_checker__r_superpage_hit_valid_T = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_superpage_hit_valid_T_1 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_superpage_hit_valid_T_2 = 1'h0; // @[package.scala:81:59]
wire pma_checker__r_superpage_hit_bits_T_1 = 1'h0; // @[OneHot.scala:32:14]
wire pma_checker__r_superpage_hit_bits_T_3 = 1'h0; // @[CircuitMath.scala:28:8]
wire pma_checker_hv = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_hv_1 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_1 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_hv_2 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_2 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_hv_3 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_3 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_hv_4 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_4 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_hv_5 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_5 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_hv_6 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_6 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_hv_7 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_7 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_hv_8 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_8 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_tagMatch = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__ignore_T = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_ignore = 1'h0; // @[TLB.scala:182:34]
wire pma_checker_hv_9 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_9 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_tagMatch_1 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__ignore_T_3 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_ignore_3 = 1'h0; // @[TLB.scala:182:34]
wire pma_checker_hv_10 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_10 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_tagMatch_2 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__ignore_T_6 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_ignore_6 = 1'h0; // @[TLB.scala:182:34]
wire pma_checker_hv_11 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_11 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_tagMatch_3 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__ignore_T_9 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_ignore_9 = 1'h0; // @[TLB.scala:182:34]
wire pma_checker_hv_12 = 1'h0; // @[TLB.scala:721:36]
wire pma_checker_hg_12 = 1'h0; // @[TLB.scala:722:36]
wire pma_checker_tagMatch_4 = 1'h0; // @[TLB.scala:178:33]
wire pma_checker__ignore_T_12 = 1'h0; // @[TLB.scala:182:28]
wire pma_checker_ignore_12 = 1'h0; // @[TLB.scala:182:34]
wire metaArb_io_in_1_valid = 1'h0; // @[DCache.scala:135:28]
wire metaArb_io_in_5_valid = 1'h0; // @[DCache.scala:135:28]
wire metaArb_io_in_5_bits_write = 1'h0; // @[DCache.scala:135:28]
wire metaArb_io_in_6_bits_write = 1'h0; // @[DCache.scala:135:28]
wire metaArb_io_in_7_bits_write = 1'h0; // @[DCache.scala:135:28]
wire dataArb_io_in_2_bits_write = 1'h0; // @[DCache.scala:152:28]
wire dataArb_io_in_3_bits_write = 1'h0; // @[DCache.scala:152:28]
wire tl_out_a_bits_corrupt = 1'h0; // @[DCache.scala:159:22]
wire nodeOut_a_deq_bits_corrupt = 1'h0; // @[Decoupled.scala:356:21]
wire _s1_tlb_req_valid_T = 1'h0; // @[Decoupled.scala:51:35]
wire s0_req_signed = 1'h0; // @[DCache.scala:192:24]
wire s0_req_no_resp = 1'h0; // @[DCache.scala:192:24]
wire s0_req_no_alloc = 1'h0; // @[DCache.scala:192:24]
wire s0_req_no_xcpt = 1'h0; // @[DCache.scala:192:24]
wire s1_waw_hazard = 1'h0; // @[DCache.scala:216:27]
wire _uncachedInFlight_WIRE_0 = 1'h0; // @[DCache.scala:236:41]
wire _s0_read_T_1 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_2 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_3 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_7 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_8 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_9 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_10 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_11 = 1'h0; // @[package.scala:81:59]
wire _s0_read_T_12 = 1'h0; // @[package.scala:81:59]
wire _s0_read_T_13 = 1'h0; // @[package.scala:81:59]
wire _s0_read_T_14 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_15 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_16 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_17 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_18 = 1'h0; // @[package.scala:16:47]
wire _s0_read_T_19 = 1'h0; // @[package.scala:81:59]
wire _s0_read_T_20 = 1'h0; // @[package.scala:81:59]
wire _s0_read_T_21 = 1'h0; // @[package.scala:81:59]
wire _s0_read_T_22 = 1'h0; // @[package.scala:81:59]
wire _s0_read_T_23 = 1'h0; // @[Consts.scala:87:44]
wire _dataArb_io_in_3_valid_res_T = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_res_T_1 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_res_T_2 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_res_T_4 = 1'h0; // @[DCache.scala:1185:58]
wire _dataArb_io_in_3_valid_T_1 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_2 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_3 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_7 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_8 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_9 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_10 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_11 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_12 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_13 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_14 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_15 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_16 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_17 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_18 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_19 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_20 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_21 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_22 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_23 = 1'h0; // @[Consts.scala:87:44]
wire _dataArb_io_in_3_valid_T_25 = 1'h0; // @[Consts.scala:90:32]
wire _dataArb_io_in_3_valid_T_26 = 1'h0; // @[Consts.scala:90:49]
wire _dataArb_io_in_3_valid_T_27 = 1'h0; // @[Consts.scala:90:42]
wire _dataArb_io_in_3_valid_T_28 = 1'h0; // @[Consts.scala:90:66]
wire _dataArb_io_in_3_valid_T_29 = 1'h0; // @[Consts.scala:90:59]
wire _dataArb_io_in_3_valid_T_30 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_31 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_32 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_33 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_34 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_35 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_36 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_37 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_38 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_39 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_40 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_41 = 1'h0; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_42 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_43 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_44 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_45 = 1'h0; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_46 = 1'h0; // @[Consts.scala:87:44]
wire _dataArb_io_in_3_valid_T_47 = 1'h0; // @[Consts.scala:90:76]
wire _dataArb_io_in_3_valid_T_48 = 1'h0; // @[DCache.scala:1191:35]
wire _dataArb_io_in_3_valid_T_49 = 1'h0; // @[DCache.scala:1191:57]
wire _dataArb_io_in_3_valid_T_50 = 1'h0; // @[DCache.scala:1191:45]
wire _dataArb_io_in_3_valid_T_51 = 1'h0; // @[DCache.scala:1191:23]
wire _dataArb_io_in_3_valid_T_53 = 1'h0; // @[DCache.scala:1186:12]
wire _dataArb_io_in_3_valid_T_57 = 1'h0; // @[DCache.scala:1186:11]
wire _s1_did_read_T_1 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_2 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_3 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_7 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_8 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_9 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_10 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_11 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_12 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_13 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_14 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_15 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_16 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_17 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_18 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_19 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_20 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_21 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_22 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_23 = 1'h0; // @[Consts.scala:87:44]
wire _s1_did_read_T_25 = 1'h0; // @[Consts.scala:90:32]
wire _s1_did_read_T_26 = 1'h0; // @[Consts.scala:90:49]
wire _s1_did_read_T_27 = 1'h0; // @[Consts.scala:90:42]
wire _s1_did_read_T_28 = 1'h0; // @[Consts.scala:90:66]
wire _s1_did_read_T_29 = 1'h0; // @[Consts.scala:90:59]
wire _s1_did_read_T_30 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_31 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_32 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_33 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_34 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_35 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_36 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_37 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_38 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_39 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_40 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_41 = 1'h0; // @[package.scala:16:47]
wire _s1_did_read_T_42 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_43 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_44 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_45 = 1'h0; // @[package.scala:81:59]
wire _s1_did_read_T_46 = 1'h0; // @[Consts.scala:87:44]
wire _s1_did_read_T_47 = 1'h0; // @[Consts.scala:90:76]
wire _s1_did_read_T_48 = 1'h0; // @[DCache.scala:1191:35]
wire _s1_did_read_T_49 = 1'h0; // @[DCache.scala:1191:57]
wire _s1_did_read_T_50 = 1'h0; // @[DCache.scala:1191:45]
wire _s1_did_read_T_51 = 1'h0; // @[DCache.scala:1191:23]
wire _tlb_io_kill_T = 1'h0; // @[DCache.scala:272:53]
wire _tlb_io_kill_T_1 = 1'h0; // @[DCache.scala:272:33]
wire _s2_pma_T_gpa_is_pte = 1'h0; // @[DCache.scala:349:18]
wire _s2_pma_T_gf_ld = 1'h0; // @[DCache.scala:349:18]
wire _s2_pma_T_gf_st = 1'h0; // @[DCache.scala:349:18]
wire _s2_pma_T_gf_inst = 1'h0; // @[DCache.scala:349:18]
wire _s2_pma_T_ma_inst = 1'h0; // @[DCache.scala:349:18]
wire s2_meta_error_uncorrectable = 1'h0; // @[DCache.scala:360:66]
wire s2_meta_error = 1'h0; // @[DCache.scala:362:83]
wire s2_store_merge = 1'h0; // @[DCache.scala:388:28]
wire _r_T_26 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_29 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_32 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_35 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_38 = 1'h0; // @[Misc.scala:35:9]
wire _s2_data_error_T = 1'h0; // @[ECC.scala:15:27]
wire _s2_data_error_T_1 = 1'h0; // @[ECC.scala:15:27]
wire _s2_data_error_T_2 = 1'h0; // @[ECC.scala:15:27]
wire _s2_data_error_T_3 = 1'h0; // @[ECC.scala:15:27]
wire _s2_data_error_T_4 = 1'h0; // @[ECC.scala:15:27]
wire _s2_data_error_T_5 = 1'h0; // @[ECC.scala:15:27]
wire _s2_data_error_T_6 = 1'h0; // @[ECC.scala:15:27]
wire _s2_data_error_T_7 = 1'h0; // @[ECC.scala:15:27]
wire _s2_data_error_T_8 = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_T_9 = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_T_10 = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_T_11 = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_T_12 = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_T_13 = 1'h0; // @[package.scala:81:59]
wire s2_data_error = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_uncorrectable_T = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_uncorrectable_T_1 = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_uncorrectable_T_2 = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_uncorrectable_T_3 = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_uncorrectable_T_4 = 1'h0; // @[package.scala:81:59]
wire _s2_data_error_uncorrectable_T_5 = 1'h0; // @[package.scala:81:59]
wire s2_data_error_uncorrectable = 1'h0; // @[package.scala:81:59]
wire s2_valid_data_error = 1'h0; // @[DCache.scala:421:63]
wire s2_cannot_victimize = 1'h0; // @[DCache.scala:428:45]
wire _r_T_73 = 1'h0; // @[Misc.scala:38:9]
wire _r_T_77 = 1'h0; // @[Misc.scala:38:9]
wire _r_T_81 = 1'h0; // @[Misc.scala:38:9]
wire _r_T_119 = 1'h0; // @[Metadata.scala:140:24]
wire _r_T_121 = 1'h0; // @[Metadata.scala:140:24]
wire _r_T_137 = 1'h0; // @[Misc.scala:38:9]
wire _r_T_141 = 1'h0; // @[Misc.scala:38:9]
wire _r_T_145 = 1'h0; // @[Misc.scala:38:9]
wire _s2_dont_nack_misc_T_2 = 1'h0; // @[DCache.scala:442:23]
wire _s2_dont_nack_misc_T_3 = 1'h0; // @[DCache.scala:442:43]
wire _s2_dont_nack_misc_T_5 = 1'h0; // @[DCache.scala:442:54]
wire _s2_dont_nack_misc_T_6 = 1'h0; // @[DCache.scala:443:23]
wire _s2_dont_nack_misc_T_8 = 1'h0; // @[DCache.scala:443:44]
wire _s2_dont_nack_misc_T_9 = 1'h0; // @[DCache.scala:442:67]
wire _s2_first_meta_corrected_T = 1'h0; // @[Mux.scala:52:83]
wire _s2_first_meta_corrected_T_1 = 1'h0; // @[Mux.scala:52:83]
wire _s2_first_meta_corrected_T_2 = 1'h0; // @[Mux.scala:52:83]
wire _s2_first_meta_corrected_T_3 = 1'h0; // @[Mux.scala:52:83]
wire _metaArb_io_in_1_valid_T_2 = 1'h0; // @[DCache.scala:450:43]
wire _metaArb_io_in_1_bits_way_en_T = 1'h0; // @[OneHot.scala:85:71]
wire _metaArb_io_in_1_bits_way_en_T_1 = 1'h0; // @[OneHot.scala:85:71]
wire _metaArb_io_in_1_bits_way_en_T_2 = 1'h0; // @[OneHot.scala:85:71]
wire _metaArb_io_in_1_bits_way_en_T_3 = 1'h0; // @[OneHot.scala:85:71]
wire s2_lr = 1'h0; // @[DCache.scala:470:56]
wire s2_sc = 1'h0; // @[DCache.scala:471:56]
wire s2_sc_fail = 1'h0; // @[DCache.scala:477:26]
wire _s2_correct_T_1 = 1'h0; // @[DCache.scala:487:34]
wire _s2_correct_T_4 = 1'h0; // @[DCache.scala:487:55]
wire s2_correct = 1'h0; // @[DCache.scala:487:97]
wire _s2_valid_correct_T = 1'h0; // @[DCache.scala:489:60]
wire s2_valid_correct = 1'h0; // @[DCache.scala:489:74]
wire _pstore1_rmw_T_49 = 1'h0; // @[DCache.scala:1191:57]
wire pstore1_rmw = 1'h0; // @[DCache.scala:498:32]
wire pstore1_merge_likely = 1'h0; // @[DCache.scala:499:68]
wire pstore1_merge = 1'h0; // @[DCache.scala:500:38]
wire _pstore_drain_opportunistic_res_T = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_res_T_1 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_res_T_2 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_res_T_4 = 1'h0; // @[DCache.scala:1185:58]
wire _pstore_drain_opportunistic_T_1 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_2 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_3 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_7 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_8 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_9 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_10 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_11 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_12 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_13 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_14 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_15 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_16 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_17 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_18 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_19 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_20 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_21 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_22 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_23 = 1'h0; // @[Consts.scala:87:44]
wire _pstore_drain_opportunistic_T_25 = 1'h0; // @[Consts.scala:90:32]
wire _pstore_drain_opportunistic_T_26 = 1'h0; // @[Consts.scala:90:49]
wire _pstore_drain_opportunistic_T_27 = 1'h0; // @[Consts.scala:90:42]
wire _pstore_drain_opportunistic_T_28 = 1'h0; // @[Consts.scala:90:66]
wire _pstore_drain_opportunistic_T_29 = 1'h0; // @[Consts.scala:90:59]
wire _pstore_drain_opportunistic_T_30 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_31 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_32 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_33 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_34 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_35 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_36 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_37 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_38 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_39 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_40 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_41 = 1'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_42 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_43 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_44 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_45 = 1'h0; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_46 = 1'h0; // @[Consts.scala:87:44]
wire _pstore_drain_opportunistic_T_47 = 1'h0; // @[Consts.scala:90:76]
wire _pstore_drain_opportunistic_T_48 = 1'h0; // @[DCache.scala:1191:35]
wire _pstore_drain_opportunistic_T_49 = 1'h0; // @[DCache.scala:1191:57]
wire _pstore_drain_opportunistic_T_50 = 1'h0; // @[DCache.scala:1191:45]
wire _pstore_drain_opportunistic_T_51 = 1'h0; // @[DCache.scala:1191:23]
wire _pstore_drain_opportunistic_T_53 = 1'h0; // @[DCache.scala:1186:12]
wire _pstore_drain_opportunistic_T_57 = 1'h0; // @[DCache.scala:1186:11]
wire _pstore_drain_opportunistic_T_60 = 1'h0; // @[DCache.scala:502:106]
wire pstore_drain_s2_kill = 1'h0; // @[DCache.scala:515:25]
wire _pstore_drain_T_1 = 1'h0; // @[DCache.scala:517:17]
wire _pstore2_storegen_data_T_2 = 1'h0; // @[DCache.scala:528:95]
wire _pstore2_storegen_data_T_6 = 1'h0; // @[DCache.scala:528:95]
wire _pstore2_storegen_data_T_10 = 1'h0; // @[DCache.scala:528:95]
wire _pstore2_storegen_data_T_14 = 1'h0; // @[DCache.scala:528:95]
wire _pstore2_storegen_data_T_18 = 1'h0; // @[DCache.scala:528:95]
wire _pstore2_storegen_data_T_22 = 1'h0; // @[DCache.scala:528:95]
wire _pstore2_storegen_data_T_26 = 1'h0; // @[DCache.scala:528:95]
wire _pstore2_storegen_data_T_30 = 1'h0; // @[DCache.scala:528:95]
wire dataArb_io_in_0_valid_s2_kill = 1'h0; // @[DCache.scala:515:25]
wire _dataArb_io_in_0_valid_T_1 = 1'h0; // @[DCache.scala:517:17]
wire _dataArb_io_in_0_bits_wordMask_T_1 = 1'h0; // @[DCache.scala:555:20]
wire _io_cpu_s2_nack_cause_raw_T_2 = 1'h0; // @[DCache.scala:574:57]
wire get_corrupt = 1'h0; // @[Edges.scala:460:17]
wire _put_legal_T_62 = 1'h0; // @[Parameters.scala:684:29]
wire _put_legal_T_68 = 1'h0; // @[Parameters.scala:684:54]
wire put_corrupt = 1'h0; // @[Edges.scala:480:17]
wire _putpartial_legal_T_62 = 1'h0; // @[Parameters.scala:684:29]
wire _putpartial_legal_T_68 = 1'h0; // @[Parameters.scala:684:54]
wire putpartial_corrupt = 1'h0; // @[Edges.scala:500:17]
wire _atomics_WIRE_source = 1'h0; // @[DCache.scala:587:51]
wire _atomics_WIRE_corrupt = 1'h0; // @[DCache.scala:587:51]
wire _atomics_WIRE_1_source = 1'h0; // @[DCache.scala:587:38]
wire _atomics_WIRE_1_corrupt = 1'h0; // @[DCache.scala:587:38]
wire _atomics_legal_T_52 = 1'h0; // @[Parameters.scala:684:29]
wire _atomics_legal_T_58 = 1'h0; // @[Parameters.scala:684:54]
wire atomics_a_corrupt = 1'h0; // @[Edges.scala:534:17]
wire _atomics_legal_T_112 = 1'h0; // @[Parameters.scala:684:29]
wire _atomics_legal_T_118 = 1'h0; // @[Parameters.scala:684:54]
wire atomics_a_1_corrupt = 1'h0; // @[Edges.scala:534:17]
wire _atomics_legal_T_172 = 1'h0; // @[Parameters.scala:684:29]
wire _atomics_legal_T_178 = 1'h0; // @[Parameters.scala:684:54]
wire atomics_a_2_corrupt = 1'h0; // @[Edges.scala:534:17]
wire _atomics_legal_T_232 = 1'h0; // @[Parameters.scala:684:29]
wire _atomics_legal_T_238 = 1'h0; // @[Parameters.scala:684:54]
wire atomics_a_3_corrupt = 1'h0; // @[Edges.scala:534:17]
wire _atomics_legal_T_292 = 1'h0; // @[Parameters.scala:684:29]
wire _atomics_legal_T_298 = 1'h0; // @[Parameters.scala:684:54]
wire atomics_a_4_corrupt = 1'h0; // @[Edges.scala:517:17]
wire _atomics_legal_T_352 = 1'h0; // @[Parameters.scala:684:29]
wire _atomics_legal_T_358 = 1'h0; // @[Parameters.scala:684:54]
wire atomics_a_5_corrupt = 1'h0; // @[Edges.scala:517:17]
wire _atomics_legal_T_412 = 1'h0; // @[Parameters.scala:684:29]
wire _atomics_legal_T_418 = 1'h0; // @[Parameters.scala:684:54]
wire atomics_a_6_corrupt = 1'h0; // @[Edges.scala:517:17]
wire _atomics_legal_T_472 = 1'h0; // @[Parameters.scala:684:29]
wire _atomics_legal_T_478 = 1'h0; // @[Parameters.scala:684:54]
wire atomics_a_7_corrupt = 1'h0; // @[Edges.scala:517:17]
wire _atomics_legal_T_532 = 1'h0; // @[Parameters.scala:684:29]
wire _atomics_legal_T_538 = 1'h0; // @[Parameters.scala:684:54]
wire atomics_a_8_corrupt = 1'h0; // @[Edges.scala:517:17]
wire _atomics_T_1_corrupt = 1'h0; // @[DCache.scala:587:81]
wire _atomics_T_3_corrupt = 1'h0; // @[DCache.scala:587:81]
wire _atomics_T_5_corrupt = 1'h0; // @[DCache.scala:587:81]
wire _atomics_T_7_corrupt = 1'h0; // @[DCache.scala:587:81]
wire _atomics_T_9_corrupt = 1'h0; // @[DCache.scala:587:81]
wire _atomics_T_11_corrupt = 1'h0; // @[DCache.scala:587:81]
wire _atomics_T_13_corrupt = 1'h0; // @[DCache.scala:587:81]
wire _atomics_T_15_corrupt = 1'h0; // @[DCache.scala:587:81]
wire atomics_corrupt = 1'h0; // @[DCache.scala:587:81]
wire _tl_out_a_valid_T_8 = 1'h0; // @[DCache.scala:607:44]
wire _tl_out_a_valid_T_9 = 1'h0; // @[DCache.scala:607:65]
wire _tl_out_a_bits_legal_T = 1'h0; // @[Parameters.scala:684:29]
wire _tl_out_a_bits_legal_T_24 = 1'h0; // @[Parameters.scala:684:54]
wire _tl_out_a_bits_legal_T_39 = 1'h0; // @[Parameters.scala:686:26]
wire tl_out_a_bits_a_source = 1'h0; // @[Edges.scala:346:17]
wire tl_out_a_bits_a_corrupt = 1'h0; // @[Edges.scala:346:17]
wire tl_out_a_bits_a_mask_sub_size = 1'h0; // @[Misc.scala:209:26]
wire _tl_out_a_bits_a_mask_sub_acc_T = 1'h0; // @[Misc.scala:215:38]
wire _tl_out_a_bits_a_mask_sub_acc_T_1 = 1'h0; // @[Misc.scala:215:38]
wire _tl_out_a_bits_a_mask_sub_acc_T_2 = 1'h0; // @[Misc.scala:215:38]
wire _tl_out_a_bits_a_mask_sub_acc_T_3 = 1'h0; // @[Misc.scala:215:38]
wire _tl_out_a_bits_T_6_corrupt = 1'h0; // @[DCache.scala:611:8]
wire _tl_out_a_bits_T_7_corrupt = 1'h0; // @[DCache.scala:610:8]
wire _tl_out_a_bits_T_8_corrupt = 1'h0; // @[DCache.scala:609:8]
wire _tl_out_a_bits_T_9_corrupt = 1'h0; // @[DCache.scala:608:23]
wire nackResponseMessage_corrupt = 1'h0; // @[Edges.scala:416:17]
wire cleanReleaseMessage_corrupt = 1'h0; // @[Edges.scala:416:17]
wire dirtyReleaseMessage_corrupt = 1'h0; // @[Edges.scala:433:17]
wire _nodeOut_c_valid_T = 1'h0; // @[DCache.scala:810:48]
wire _nodeOut_c_valid_T_2 = 1'h0; // @[DCache.scala:810:74]
wire _discard_line_T_2 = 1'h0; // @[DCache.scala:818:102]
wire _release_state_T_2 = 1'h0; // @[DCache.scala:820:28]
wire _release_state_T_4 = 1'h0; // @[DCache.scala:820:54]
wire _release_state_T_5 = 1'h0; // @[DCache.scala:820:75]
wire _release_state_T_7 = 1'h0; // @[DCache.scala:820:98]
wire _release_state_T_12 = 1'h0; // @[DCache.scala:820:127]
wire probe_bits_res_source = 1'h0; // @[DCache.scala:1202:19]
wire probe_bits_res_corrupt = 1'h0; // @[DCache.scala:1202:19]
wire _nodeOut_c_bits_legal_T = 1'h0; // @[Parameters.scala:684:29]
wire _nodeOut_c_bits_legal_T_1 = 1'h0; // @[Parameters.scala:137:31]
wire _nodeOut_c_bits_legal_T_10 = 1'h0; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_15 = 1'h0; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_20 = 1'h0; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_24 = 1'h0; // @[Parameters.scala:684:54]
wire _nodeOut_c_bits_legal_T_31 = 1'h0; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_36 = 1'h0; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_37 = 1'h0; // @[Parameters.scala:685:42]
wire _nodeOut_c_bits_legal_T_38 = 1'h0; // @[Parameters.scala:684:54]
wire _nodeOut_c_bits_legal_T_39 = 1'h0; // @[Parameters.scala:686:26]
wire nodeOut_c_bits_legal = 1'h0; // @[Parameters.scala:686:26]
wire nodeOut_c_bits_c_source = 1'h0; // @[Edges.scala:380:17]
wire nodeOut_c_bits_c_corrupt = 1'h0; // @[Edges.scala:380:17]
wire _nodeOut_c_bits_legal_T_40 = 1'h0; // @[Parameters.scala:684:29]
wire _nodeOut_c_bits_legal_T_41 = 1'h0; // @[Parameters.scala:137:31]
wire _nodeOut_c_bits_legal_T_50 = 1'h0; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_55 = 1'h0; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_60 = 1'h0; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_64 = 1'h0; // @[Parameters.scala:684:54]
wire _nodeOut_c_bits_legal_T_71 = 1'h0; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_76 = 1'h0; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_77 = 1'h0; // @[Parameters.scala:685:42]
wire _nodeOut_c_bits_legal_T_78 = 1'h0; // @[Parameters.scala:684:54]
wire _nodeOut_c_bits_legal_T_79 = 1'h0; // @[Parameters.scala:686:26]
wire nodeOut_c_bits_legal_1 = 1'h0; // @[Parameters.scala:686:26]
wire nodeOut_c_bits_c_1_source = 1'h0; // @[Edges.scala:396:17]
wire nodeOut_c_bits_c_1_corrupt = 1'h0; // @[Edges.scala:396:17]
wire _nodeOut_c_bits_corrupt_T = 1'h0; // @[DCache.scala:887:42]
wire _io_cpu_s2_xcpt_WIRE_miss = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_gpa_is_pte = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_pf_ld = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_pf_st = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_pf_inst = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_gf_ld = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_gf_st = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_gf_inst = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_ae_ld = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_ae_st = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_ae_inst = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_ma_ld = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_ma_st = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_ma_inst = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_cacheable = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_must_alloc = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_WIRE_prefetchable = 1'h0; // @[DCache.scala:933:74]
wire _io_cpu_s2_xcpt_T_gpa_is_pte = 1'h0; // @[DCache.scala:933:24]
wire _io_cpu_s2_xcpt_T_gf_ld = 1'h0; // @[DCache.scala:933:24]
wire _io_cpu_s2_xcpt_T_gf_st = 1'h0; // @[DCache.scala:933:24]
wire _io_cpu_s2_xcpt_T_gf_inst = 1'h0; // @[DCache.scala:933:24]
wire _io_cpu_s2_xcpt_T_ma_inst = 1'h0; // @[DCache.scala:933:24]
wire _s2_data_word_possibly_uncached_T = 1'h0; // @[DCache.scala:972:73]
wire io_cpu_resp_bits_data_doZero = 1'h0; // @[AMOALU.scala:43:31]
wire io_cpu_resp_bits_data_doZero_1 = 1'h0; // @[AMOALU.scala:43:31]
wire io_cpu_resp_bits_data_doZero_2 = 1'h0; // @[AMOALU.scala:43:31]
wire io_cpu_resp_bits_data_word_bypass_doZero = 1'h0; // @[AMOALU.scala:43:31]
wire _s1_flush_valid_T = 1'h0; // @[Decoupled.scala:51:35]
wire _s1_flush_valid_T_2 = 1'h0; // @[DCache.scala:1014:43]
wire _s1_flush_valid_T_4 = 1'h0; // @[DCache.scala:1014:62]
wire _s1_flush_valid_T_6 = 1'h0; // @[DCache.scala:1014:93]
wire _s1_flush_valid_T_8 = 1'h0; // @[DCache.scala:1014:122]
wire _metaArb_io_in_5_valid_T = 1'h0; // @[DCache.scala:1015:41]
wire _metaArb_io_in_5_valid_T_1 = 1'h0; // @[DCache.scala:1015:38]
wire _clock_en_reg_T_16 = 1'h0; // @[DCache.scala:1070:25]
wire _io_cpu_perf_storeBufferEmptyAfterStore_T_2 = 1'h0; // @[DCache.scala:1086:27]
wire _io_cpu_perf_canAcceptStoreThenLoad_T_1 = 1'h0; // @[DCache.scala:1089:28]
wire _io_cpu_perf_canAcceptStoreThenLoad_T_5 = 1'h0; // @[DCache.scala:1089:44]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_50 = 1'h0; // @[DCache.scala:1191:57]
wire [63:0] io_cpu_req_bits_data = 64'h0; // @[DCache.scala:101:7]
wire [63:0] s0_req_data = 64'h0; // @[DCache.scala:192:24]
wire [63:0] get_data = 64'h0; // @[Edges.scala:460:17]
wire [63:0] _atomics_WIRE_data = 64'h0; // @[DCache.scala:587:51]
wire [63:0] _atomics_WIRE_1_data = 64'h0; // @[DCache.scala:587:38]
wire [63:0] tl_out_a_bits_a_data = 64'h0; // @[Edges.scala:346:17]
wire [63:0] nackResponseMessage_data = 64'h0; // @[Edges.scala:416:17]
wire [63:0] cleanReleaseMessage_data = 64'h0; // @[Edges.scala:416:17]
wire [63:0] dirtyReleaseMessage_data = 64'h0; // @[Edges.scala:433:17]
wire [63:0] probe_bits_res_data = 64'h0; // @[DCache.scala:1202:19]
wire [63:0] nodeOut_c_bits_c_data = 64'h0; // @[Edges.scala:380:17]
wire [63:0] nodeOut_c_bits_c_1_data = 64'h0; // @[Edges.scala:396:17]
wire [63:0] _s2_data_word_possibly_uncached_T_1 = 64'h0; // @[DCache.scala:972:43]
wire [7:0] io_cpu_req_bits_mask = 8'h0; // @[DCache.scala:101:7]
wire [7:0] io_ptw_gstatus_zero1 = 8'h0; // @[DCache.scala:101:7]
wire [7:0] pma_checker_io_ptw_status_zero1 = 8'h0; // @[DCache.scala:120:32]
wire [7:0] pma_checker_io_ptw_gstatus_zero1 = 8'h0; // @[DCache.scala:120:32]
wire [7:0] pma_checker_r_sectored_repl_addr_valids = 8'h0; // @[package.scala:45:27]
wire [7:0] pma_checker__r_sectored_hit_bits_T = 8'h0; // @[OneHot.scala:21:45]
wire [7:0] s0_req_mask = 8'h0; // @[DCache.scala:192:24]
wire [7:0] _pstore2_storegen_mask_mergedMask_T = 8'h0; // @[DCache.scala:533:42]
wire [7:0] _atomics_WIRE_mask = 8'h0; // @[DCache.scala:587:51]
wire [7:0] _atomics_WIRE_1_mask = 8'h0; // @[DCache.scala:587:38]
wire [7:0] probe_bits_res_mask = 8'h0; // @[DCache.scala:1202:19]
wire io_cpu_clock_enabled = 1'h1; // @[DCache.scala:101:7]
wire io_ptw_req_bits_valid = 1'h1; // @[DCache.scala:101:7]
wire io_tlb_port_req_ready = 1'h1; // @[DCache.scala:101:7]
wire pma_checker_io_req_ready = 1'h1; // @[DCache.scala:120:32]
wire pma_checker_io_req_bits_passthrough = 1'h1; // @[DCache.scala:120:32]
wire pma_checker_io_ptw_req_bits_valid = 1'h1; // @[DCache.scala:120:32]
wire pma_checker__mpu_ppn_ignore_T = 1'h1; // @[TLB.scala:197:28]
wire pma_checker_mpu_ppn_ignore = 1'h1; // @[TLB.scala:197:34]
wire pma_checker__mpu_ppn_ignore_T_1 = 1'h1; // @[TLB.scala:197:28]
wire pma_checker_mpu_ppn_ignore_1 = 1'h1; // @[TLB.scala:197:34]
wire pma_checker__mpu_priv_T = 1'h1; // @[TLB.scala:415:52]
wire pma_checker__mpu_priv_T_1 = 1'h1; // @[TLB.scala:415:38]
wire pma_checker__homogeneous_T_71 = 1'h1; // @[TLBPermissions.scala:87:22]
wire pma_checker__deny_access_to_debug_T = 1'h1; // @[TLB.scala:428:39]
wire pma_checker__sector_hits_T_6 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__sector_hits_T_14 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__sector_hits_T_22 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__sector_hits_T_30 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__sector_hits_T_38 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__sector_hits_T_46 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__sector_hits_T_54 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__sector_hits_T_62 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__superpage_hits_tagMatch_T = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__superpage_hits_ignore_T_1 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__superpage_hits_ignore_T_2 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_superpage_hits_ignore_2 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__superpage_hits_T_13 = 1'h1; // @[TLB.scala:183:40]
wire pma_checker__superpage_hits_tagMatch_T_1 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__superpage_hits_ignore_T_4 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__superpage_hits_ignore_T_5 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_superpage_hits_ignore_5 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__superpage_hits_T_27 = 1'h1; // @[TLB.scala:183:40]
wire pma_checker__superpage_hits_tagMatch_T_2 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__superpage_hits_ignore_T_7 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__superpage_hits_ignore_T_8 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_superpage_hits_ignore_8 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__superpage_hits_T_41 = 1'h1; // @[TLB.scala:183:40]
wire pma_checker__superpage_hits_tagMatch_T_3 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__superpage_hits_ignore_T_10 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__superpage_hits_ignore_T_11 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_superpage_hits_ignore_11 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__superpage_hits_T_55 = 1'h1; // @[TLB.scala:183:40]
wire pma_checker__hitsVec_T_3 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__hitsVec_T_9 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__hitsVec_T_15 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__hitsVec_T_21 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__hitsVec_T_27 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__hitsVec_T_33 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__hitsVec_T_39 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__hitsVec_T_45 = 1'h1; // @[TLB.scala:174:105]
wire pma_checker__hitsVec_tagMatch_T = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__hitsVec_ignore_T_1 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__hitsVec_ignore_T_2 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore_2 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_61 = 1'h1; // @[TLB.scala:183:40]
wire pma_checker__hitsVec_tagMatch_T_1 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__hitsVec_ignore_T_4 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__hitsVec_ignore_T_5 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore_5 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_76 = 1'h1; // @[TLB.scala:183:40]
wire pma_checker__hitsVec_tagMatch_T_2 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__hitsVec_ignore_T_7 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__hitsVec_ignore_T_8 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore_8 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_91 = 1'h1; // @[TLB.scala:183:40]
wire pma_checker__hitsVec_tagMatch_T_3 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__hitsVec_ignore_T_10 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__hitsVec_ignore_T_11 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore_11 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_106 = 1'h1; // @[TLB.scala:183:40]
wire pma_checker__hitsVec_tagMatch_T_4 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__hitsVec_ignore_T_13 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore_13 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_116 = 1'h1; // @[TLB.scala:183:40]
wire pma_checker__hitsVec_ignore_T_14 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_hitsVec_ignore_14 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__hitsVec_T_121 = 1'h1; // @[TLB.scala:183:40]
wire pma_checker__hits_T = 1'h1; // @[TLB.scala:442:18]
wire pma_checker__newEntry_sr_T = 1'h1; // @[PTW.scala:141:47]
wire pma_checker__newEntry_sw_T = 1'h1; // @[PTW.scala:141:47]
wire pma_checker__newEntry_sx_T = 1'h1; // @[PTW.scala:141:47]
wire pma_checker__ppn_T = 1'h1; // @[TLB.scala:502:30]
wire pma_checker__ppn_ignore_T = 1'h1; // @[TLB.scala:197:28]
wire pma_checker__ppn_ignore_T_1 = 1'h1; // @[TLB.scala:197:28]
wire pma_checker_ppn_ignore_1 = 1'h1; // @[TLB.scala:197:34]
wire pma_checker__ppn_ignore_T_2 = 1'h1; // @[TLB.scala:197:28]
wire pma_checker__ppn_ignore_T_3 = 1'h1; // @[TLB.scala:197:28]
wire pma_checker_ppn_ignore_3 = 1'h1; // @[TLB.scala:197:34]
wire pma_checker__ppn_ignore_T_4 = 1'h1; // @[TLB.scala:197:28]
wire pma_checker__ppn_ignore_T_5 = 1'h1; // @[TLB.scala:197:28]
wire pma_checker_ppn_ignore_5 = 1'h1; // @[TLB.scala:197:34]
wire pma_checker__ppn_ignore_T_6 = 1'h1; // @[TLB.scala:197:28]
wire pma_checker__ppn_ignore_T_7 = 1'h1; // @[TLB.scala:197:28]
wire pma_checker_ppn_ignore_7 = 1'h1; // @[TLB.scala:197:34]
wire pma_checker__ppn_ignore_T_8 = 1'h1; // @[TLB.scala:197:28]
wire pma_checker_ppn_ignore_8 = 1'h1; // @[TLB.scala:197:34]
wire pma_checker__ppn_ignore_T_9 = 1'h1; // @[TLB.scala:197:28]
wire pma_checker_ppn_ignore_9 = 1'h1; // @[TLB.scala:197:34]
wire pma_checker__stage1_bypass_T_1 = 1'h1; // @[TLB.scala:517:83]
wire pma_checker__stage2_bypass_T = 1'h1; // @[TLB.scala:523:42]
wire pma_checker__bad_va_T_1 = 1'h1; // @[TLB.scala:560:26]
wire pma_checker__gpa_hits_hit_mask_T_3 = 1'h1; // @[TLB.scala:606:107]
wire pma_checker__tlb_miss_T = 1'h1; // @[TLB.scala:613:32]
wire pma_checker__tlb_miss_T_2 = 1'h1; // @[TLB.scala:613:56]
wire pma_checker__tlb_miss_T_4 = 1'h1; // @[TLB.scala:613:67]
wire pma_checker_state_vec_0_set_left_older = 1'h1; // @[Replacement.scala:196:33]
wire pma_checker_state_vec_0_set_left_older_1 = 1'h1; // @[Replacement.scala:196:33]
wire pma_checker__state_vec_0_T_3 = 1'h1; // @[Replacement.scala:218:7]
wire pma_checker__state_vec_0_T_7 = 1'h1; // @[Replacement.scala:218:7]
wire pma_checker__state_vec_0_T_8 = 1'h1; // @[Replacement.scala:206:16]
wire pma_checker_state_vec_0_set_left_older_2 = 1'h1; // @[Replacement.scala:196:33]
wire pma_checker__state_vec_0_T_14 = 1'h1; // @[Replacement.scala:218:7]
wire pma_checker__state_vec_0_T_18 = 1'h1; // @[Replacement.scala:218:7]
wire pma_checker__state_vec_0_T_19 = 1'h1; // @[Replacement.scala:206:16]
wire pma_checker_state_reg_set_left_older = 1'h1; // @[Replacement.scala:196:33]
wire pma_checker__state_reg_T_2 = 1'h1; // @[Replacement.scala:218:7]
wire pma_checker__state_reg_T_6 = 1'h1; // @[Replacement.scala:218:7]
wire pma_checker__state_reg_T_7 = 1'h1; // @[Replacement.scala:206:16]
wire pma_checker__io_req_ready_T = 1'h1; // @[TLB.scala:631:25]
wire pma_checker__io_resp_gpa_page_T = 1'h1; // @[TLB.scala:657:20]
wire pma_checker__io_ptw_req_bits_valid_T = 1'h1; // @[TLB.scala:663:28]
wire pma_checker__r_superpage_repl_addr_T_6 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_superpage_repl_addr_T_7 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_superpage_repl_addr_T_8 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_superpage_repl_addr_T_9 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_sectored_repl_addr_T_12 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_sectored_repl_addr_T_13 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_sectored_repl_addr_T_14 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_sectored_repl_addr_T_15 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_sectored_repl_addr_T_16 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_sectored_repl_addr_T_17 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_sectored_repl_addr_T_18 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__r_sectored_repl_addr_T_19 = 1'h1; // @[OneHot.scala:48:45]
wire pma_checker__tagMatch_T = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__ignore_T_1 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__ignore_T_2 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_ignore_2 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__tagMatch_T_1 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__ignore_T_4 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__ignore_T_5 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_ignore_5 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__tagMatch_T_2 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__ignore_T_7 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__ignore_T_8 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_ignore_8 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__tagMatch_T_3 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__ignore_T_10 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker__ignore_T_11 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_ignore_11 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__tagMatch_T_4 = 1'h1; // @[TLB.scala:178:43]
wire pma_checker__ignore_T_13 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_ignore_13 = 1'h1; // @[TLB.scala:182:34]
wire pma_checker__ignore_T_14 = 1'h1; // @[TLB.scala:182:28]
wire pma_checker_ignore_14 = 1'h1; // @[TLB.scala:182:34]
wire metaArb_io_in_0_ready = 1'h1; // @[DCache.scala:135:28]
wire metaArb_io_in_0_bits_write = 1'h1; // @[DCache.scala:135:28]
wire metaArb_io_in_1_bits_write = 1'h1; // @[DCache.scala:135:28]
wire metaArb_io_in_2_bits_write = 1'h1; // @[DCache.scala:135:28]
wire metaArb_io_in_3_bits_write = 1'h1; // @[DCache.scala:135:28]
wire metaArb_io_in_4_bits_write = 1'h1; // @[DCache.scala:135:28]
wire metaArb_io_out_ready = 1'h1; // @[DCache.scala:135:28]
wire metaArb__io_in_0_ready_T = 1'h1; // @[Arbiter.scala:153:19]
wire dataArb_io_in_0_ready = 1'h1; // @[DCache.scala:152:28]
wire dataArb_io_in_1_bits_wordMask = 1'h1; // @[DCache.scala:152:28]
wire dataArb_io_in_2_bits_wordMask = 1'h1; // @[DCache.scala:152:28]
wire dataArb_io_in_3_bits_wordMask = 1'h1; // @[DCache.scala:152:28]
wire dataArb_io_out_ready = 1'h1; // @[DCache.scala:152:28]
wire dataArb__io_in_0_ready_T = 1'h1; // @[Arbiter.scala:153:19]
wire _s0_read_T = 1'h1; // @[package.scala:16:47]
wire _s0_read_T_4 = 1'h1; // @[package.scala:81:59]
wire _s0_read_T_5 = 1'h1; // @[package.scala:81:59]
wire _s0_read_T_6 = 1'h1; // @[package.scala:81:59]
wire s0_read = 1'h1; // @[Consts.scala:89:68]
wire _dataArb_io_in_3_valid_res_T_3 = 1'h1; // @[DCache.scala:1185:15]
wire dataArb_io_in_3_valid_res = 1'h1; // @[DCache.scala:1185:46]
wire _dataArb_io_in_3_valid_T = 1'h1; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_T_4 = 1'h1; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_5 = 1'h1; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_6 = 1'h1; // @[package.scala:81:59]
wire _dataArb_io_in_3_valid_T_24 = 1'h1; // @[Consts.scala:89:68]
wire _dataArb_io_in_3_valid_T_52 = 1'h1; // @[DCache.scala:1190:21]
wire _dataArb_io_in_3_valid_T_54 = 1'h1; // @[DCache.scala:1186:28]
wire _s1_did_read_T = 1'h1; // @[package.scala:16:47]
wire _s1_did_read_T_4 = 1'h1; // @[package.scala:81:59]
wire _s1_did_read_T_5 = 1'h1; // @[package.scala:81:59]
wire _s1_did_read_T_6 = 1'h1; // @[package.scala:81:59]
wire _s1_did_read_T_24 = 1'h1; // @[Consts.scala:89:68]
wire _s1_did_read_T_52 = 1'h1; // @[DCache.scala:1190:21]
wire _s2_valid_not_killed_T = 1'h1; // @[DCache.scala:338:48]
wire _s2_flush_valid_T = 1'h1; // @[DCache.scala:363:54]
wire _s2_valid_hit_maybe_flush_pre_data_ecc_and_waw_T = 1'h1; // @[DCache.scala:397:74]
wire _s2_valid_hit_pre_data_ecc_and_waw_T_1 = 1'h1; // @[DCache.scala:418:108]
wire _s2_valid_hit_pre_data_ecc_T = 1'h1; // @[DCache.scala:420:73]
wire _s2_valid_hit_pre_data_ecc_T_1 = 1'h1; // @[DCache.scala:420:88]
wire _s2_valid_hit_T = 1'h1; // @[DCache.scala:422:51]
wire _s2_valid_miss_T_1 = 1'h1; // @[DCache.scala:423:58]
wire _s2_victimize_T = 1'h1; // @[DCache.scala:429:43]
wire _r_T_117 = 1'h1; // @[Metadata.scala:140:24]
wire _s2_dont_nack_misc_T = 1'h1; // @[DCache.scala:441:46]
wire _s2_dont_nack_misc_T_4 = 1'h1; // @[DCache.scala:442:57]
wire _metaArb_io_in_2_bits_write_T = 1'h1; // @[DCache.scala:463:34]
wire _s2_valid_correct_T_1 = 1'h1; // @[DCache.scala:489:77]
wire _pstore1_merge_T_1 = 1'h1; // @[DCache.scala:490:61]
wire _pstore1_merge_T_3 = 1'h1; // @[DCache.scala:491:51]
wire _pstore_drain_opportunistic_res_T_3 = 1'h1; // @[DCache.scala:1185:15]
wire pstore_drain_opportunistic_res = 1'h1; // @[DCache.scala:1185:46]
wire _pstore_drain_opportunistic_T = 1'h1; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_4 = 1'h1; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_5 = 1'h1; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_6 = 1'h1; // @[package.scala:81:59]
wire _pstore_drain_opportunistic_T_24 = 1'h1; // @[Consts.scala:89:68]
wire _pstore_drain_opportunistic_T_52 = 1'h1; // @[DCache.scala:1190:21]
wire _pstore_drain_opportunistic_T_54 = 1'h1; // @[DCache.scala:1186:28]
wire _pstore_drain_opportunistic_T_61 = 1'h1; // @[DCache.scala:502:95]
wire _pstore1_valid_T_1 = 1'h1; // @[DCache.scala:490:61]
wire _pstore1_valid_T_3 = 1'h1; // @[DCache.scala:491:51]
wire _pstore_drain_T = 1'h1; // @[DCache.scala:516:5]
wire _pstore_drain_T_3 = 1'h1; // @[DCache.scala:506:87]
wire _pstore_drain_T_6 = 1'h1; // @[DCache.scala:518:44]
wire _pstore1_held_T_1 = 1'h1; // @[DCache.scala:490:61]
wire _pstore1_held_T_3 = 1'h1; // @[DCache.scala:491:51]
wire _pstore1_held_T_5 = 1'h1; // @[DCache.scala:521:38]
wire _dataArb_io_in_0_valid_T = 1'h1; // @[DCache.scala:516:5]
wire _dataArb_io_in_0_valid_T_3 = 1'h1; // @[DCache.scala:506:87]
wire _dataArb_io_in_0_valid_T_6 = 1'h1; // @[DCache.scala:518:44]
wire _dataArb_io_in_0_bits_wordMask_T = 1'h1; // @[DCache.scala:555:20]
wire _io_cpu_s2_nack_cause_raw_T = 1'h1; // @[DCache.scala:574:59]
wire _io_cpu_s2_nack_cause_raw_T_1 = 1'h1; // @[DCache.scala:574:74]
wire _get_legal_T = 1'h1; // @[Parameters.scala:92:28]
wire _get_legal_T_1 = 1'h1; // @[Parameters.scala:92:38]
wire _get_legal_T_2 = 1'h1; // @[Parameters.scala:92:33]
wire _get_legal_T_3 = 1'h1; // @[Parameters.scala:684:29]
wire _get_legal_T_10 = 1'h1; // @[Parameters.scala:92:28]
wire _get_legal_T_11 = 1'h1; // @[Parameters.scala:92:38]
wire _get_legal_T_12 = 1'h1; // @[Parameters.scala:92:33]
wire _get_legal_T_13 = 1'h1; // @[Parameters.scala:684:29]
wire _get_legal_T_62 = 1'h1; // @[Parameters.scala:92:28]
wire _get_legal_T_63 = 1'h1; // @[Parameters.scala:92:38]
wire _get_legal_T_64 = 1'h1; // @[Parameters.scala:92:33]
wire _get_legal_T_65 = 1'h1; // @[Parameters.scala:684:29]
wire _put_legal_T = 1'h1; // @[Parameters.scala:92:28]
wire _put_legal_T_1 = 1'h1; // @[Parameters.scala:92:38]
wire _put_legal_T_2 = 1'h1; // @[Parameters.scala:92:33]
wire _put_legal_T_3 = 1'h1; // @[Parameters.scala:684:29]
wire _put_legal_T_10 = 1'h1; // @[Parameters.scala:92:28]
wire _put_legal_T_11 = 1'h1; // @[Parameters.scala:92:38]
wire _put_legal_T_12 = 1'h1; // @[Parameters.scala:92:33]
wire _put_legal_T_13 = 1'h1; // @[Parameters.scala:684:29]
wire _put_legal_T_69 = 1'h1; // @[Parameters.scala:92:28]
wire _put_legal_T_70 = 1'h1; // @[Parameters.scala:92:38]
wire _put_legal_T_71 = 1'h1; // @[Parameters.scala:92:33]
wire _put_legal_T_72 = 1'h1; // @[Parameters.scala:684:29]
wire _putpartial_legal_T = 1'h1; // @[Parameters.scala:92:28]
wire _putpartial_legal_T_1 = 1'h1; // @[Parameters.scala:92:38]
wire _putpartial_legal_T_2 = 1'h1; // @[Parameters.scala:92:33]
wire _putpartial_legal_T_3 = 1'h1; // @[Parameters.scala:684:29]
wire _putpartial_legal_T_10 = 1'h1; // @[Parameters.scala:92:28]
wire _putpartial_legal_T_11 = 1'h1; // @[Parameters.scala:92:38]
wire _putpartial_legal_T_12 = 1'h1; // @[Parameters.scala:92:33]
wire _putpartial_legal_T_13 = 1'h1; // @[Parameters.scala:684:29]
wire _putpartial_legal_T_69 = 1'h1; // @[Parameters.scala:92:28]
wire _putpartial_legal_T_70 = 1'h1; // @[Parameters.scala:92:38]
wire _putpartial_legal_T_71 = 1'h1; // @[Parameters.scala:92:33]
wire _putpartial_legal_T_72 = 1'h1; // @[Parameters.scala:684:29]
wire _atomics_legal_T = 1'h1; // @[Parameters.scala:92:28]
wire _atomics_legal_T_1 = 1'h1; // @[Parameters.scala:92:38]
wire _atomics_legal_T_2 = 1'h1; // @[Parameters.scala:92:33]
wire _atomics_legal_T_3 = 1'h1; // @[Parameters.scala:684:29]
wire _atomics_legal_T_60 = 1'h1; // @[Parameters.scala:92:28]
wire _atomics_legal_T_61 = 1'h1; // @[Parameters.scala:92:38]
wire _atomics_legal_T_62 = 1'h1; // @[Parameters.scala:92:33]
wire _atomics_legal_T_63 = 1'h1; // @[Parameters.scala:684:29]
wire _atomics_legal_T_120 = 1'h1; // @[Parameters.scala:92:28]
wire _atomics_legal_T_121 = 1'h1; // @[Parameters.scala:92:38]
wire _atomics_legal_T_122 = 1'h1; // @[Parameters.scala:92:33]
wire _atomics_legal_T_123 = 1'h1; // @[Parameters.scala:684:29]
wire _atomics_legal_T_180 = 1'h1; // @[Parameters.scala:92:28]
wire _atomics_legal_T_181 = 1'h1; // @[Parameters.scala:92:38]
wire _atomics_legal_T_182 = 1'h1; // @[Parameters.scala:92:33]
wire _atomics_legal_T_183 = 1'h1; // @[Parameters.scala:684:29]
wire _atomics_legal_T_240 = 1'h1; // @[Parameters.scala:92:28]
wire _atomics_legal_T_241 = 1'h1; // @[Parameters.scala:92:38]
wire _atomics_legal_T_242 = 1'h1; // @[Parameters.scala:92:33]
wire _atomics_legal_T_243 = 1'h1; // @[Parameters.scala:684:29]
wire _atomics_legal_T_300 = 1'h1; // @[Parameters.scala:92:28]
wire _atomics_legal_T_301 = 1'h1; // @[Parameters.scala:92:38]
wire _atomics_legal_T_302 = 1'h1; // @[Parameters.scala:92:33]
wire _atomics_legal_T_303 = 1'h1; // @[Parameters.scala:684:29]
wire _atomics_legal_T_360 = 1'h1; // @[Parameters.scala:92:28]
wire _atomics_legal_T_361 = 1'h1; // @[Parameters.scala:92:38]
wire _atomics_legal_T_362 = 1'h1; // @[Parameters.scala:92:33]
wire _atomics_legal_T_363 = 1'h1; // @[Parameters.scala:684:29]
wire _atomics_legal_T_420 = 1'h1; // @[Parameters.scala:92:28]
wire _atomics_legal_T_421 = 1'h1; // @[Parameters.scala:92:38]
wire _atomics_legal_T_422 = 1'h1; // @[Parameters.scala:92:33]
wire _atomics_legal_T_423 = 1'h1; // @[Parameters.scala:684:29]
wire _atomics_legal_T_480 = 1'h1; // @[Parameters.scala:92:28]
wire _atomics_legal_T_481 = 1'h1; // @[Parameters.scala:92:38]
wire _atomics_legal_T_482 = 1'h1; // @[Parameters.scala:92:33]
wire _atomics_legal_T_483 = 1'h1; // @[Parameters.scala:684:29]
wire _tl_out_a_valid_T = 1'h1; // @[DCache.scala:603:21]
wire _tl_out_a_bits_legal_T_25 = 1'h1; // @[Parameters.scala:91:44]
wire _tl_out_a_bits_legal_T_26 = 1'h1; // @[Parameters.scala:684:29]
wire tl_out_a_bits_a_mask_sub_sub_sub_0_1 = 1'h1; // @[Misc.scala:206:21]
wire tl_out_a_bits_a_mask_sub_sub_size = 1'h1; // @[Misc.scala:209:26]
wire tl_out_a_bits_a_mask_sub_sub_0_1 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_sub_sub_1_1 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_sub_0_1 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_sub_1_1 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_sub_2_1 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_sub_3_1 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_size = 1'h1; // @[Misc.scala:209:26]
wire tl_out_a_bits_a_mask_acc = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_acc_1 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_acc_2 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_acc_3 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_acc_4 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_acc_5 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_acc_6 = 1'h1; // @[Misc.scala:215:29]
wire tl_out_a_bits_a_mask_acc_7 = 1'h1; // @[Misc.scala:215:29]
wire _tl_d_data_encoded_T_9 = 1'h1; // @[DCache.scala:663:80]
wire _dataArb_io_in_1_bits_wordMask_T = 1'h1; // @[DCache.scala:731:39]
wire _nodeOut_c_bits_legal_T_5 = 1'h1; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_21 = 1'h1; // @[Parameters.scala:685:42]
wire _nodeOut_c_bits_legal_T_22 = 1'h1; // @[Parameters.scala:685:42]
wire _nodeOut_c_bits_legal_T_23 = 1'h1; // @[Parameters.scala:685:42]
wire _nodeOut_c_bits_legal_T_25 = 1'h1; // @[Parameters.scala:91:44]
wire _nodeOut_c_bits_legal_T_26 = 1'h1; // @[Parameters.scala:684:29]
wire _nodeOut_c_bits_legal_T_45 = 1'h1; // @[Parameters.scala:137:59]
wire _nodeOut_c_bits_legal_T_61 = 1'h1; // @[Parameters.scala:685:42]
wire _nodeOut_c_bits_legal_T_62 = 1'h1; // @[Parameters.scala:685:42]
wire _nodeOut_c_bits_legal_T_63 = 1'h1; // @[Parameters.scala:685:42]
wire _nodeOut_c_bits_legal_T_65 = 1'h1; // @[Parameters.scala:91:44]
wire _nodeOut_c_bits_legal_T_66 = 1'h1; // @[Parameters.scala:684:29]
wire _dataArb_io_in_2_bits_wordMask_T = 1'h1; // @[DCache.scala:904:37]
wire _io_cpu_resp_valid_T_1 = 1'h1; // @[DCache.scala:949:73]
wire _io_cpu_replay_next_T_2 = 1'h1; // @[DCache.scala:950:65]
wire _clock_en_reg_T = 1'h1; // @[DCache.scala:1060:19]
wire _clock_en_reg_T_1 = 1'h1; // @[DCache.scala:1060:44]
wire _clock_en_reg_T_2 = 1'h1; // @[DCache.scala:1061:46]
wire _clock_en_reg_T_3 = 1'h1; // @[DCache.scala:1062:31]
wire _clock_en_reg_T_4 = 1'h1; // @[DCache.scala:1063:26]
wire _clock_en_reg_T_5 = 1'h1; // @[DCache.scala:1064:14]
wire _clock_en_reg_T_6 = 1'h1; // @[DCache.scala:1064:26]
wire _clock_en_reg_T_7 = 1'h1; // @[DCache.scala:1065:14]
wire _clock_en_reg_T_8 = 1'h1; // @[DCache.scala:1065:26]
wire _clock_en_reg_T_9 = 1'h1; // @[DCache.scala:1066:27]
wire _clock_en_reg_T_10 = 1'h1; // @[DCache.scala:1067:22]
wire _clock_en_reg_T_11 = 1'h1; // @[DCache.scala:1067:42]
wire _clock_en_reg_T_12 = 1'h1; // @[DCache.scala:1068:18]
wire _clock_en_reg_T_14 = 1'h1; // @[DCache.scala:1068:35]
wire _clock_en_reg_T_15 = 1'h1; // @[DCache.scala:1069:31]
wire _clock_en_reg_T_17 = 1'h1; // @[DCache.scala:1070:22]
wire _clock_en_reg_T_19 = 1'h1; // @[DCache.scala:1070:46]
wire _clock_en_reg_T_20 = 1'h1; // @[DCache.scala:1071:23]
wire _clock_en_reg_T_22 = 1'h1; // @[DCache.scala:1072:23]
wire _clock_en_reg_T_24 = 1'h1; // @[DCache.scala:1072:54]
wire _clock_en_reg_T_26 = 1'h1; // @[DCache.scala:1073:21]
wire _io_cpu_perf_storeBufferEmptyAfterLoad_T_2 = 1'h1; // @[DCache.scala:1082:31]
wire _io_cpu_perf_storeBufferEmptyAfterStore_T_5 = 1'h1; // @[DCache.scala:1087:31]
wire _io_cpu_perf_canAcceptStoreThenLoad_T_3 = 1'h1; // @[DCache.scala:1089:72]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_56 = 1'h1; // @[DCache.scala:1092:115]
wire [1:0] io_ptw_hstatus_vsxl = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_ptw_hstatus_zero3 = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_ptw_hstatus_zero2 = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_ptw_gstatus_dprv = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_ptw_gstatus_prv = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_ptw_gstatus_sxl = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_ptw_gstatus_uxl = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_ptw_gstatus_xs = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_ptw_gstatus_fs = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_ptw_gstatus_mpp = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_ptw_gstatus_vs = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_tlb_port_req_bits_size = 2'h0; // @[DCache.scala:101:7]
wire [1:0] io_tlb_port_req_bits_prv = 2'h0; // @[DCache.scala:101:7]
wire [1:0] pma_checker_io_ptw_resp_bits_pte_reserved_for_software = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_resp_bits_level = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_status_dprv = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_status_prv = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_status_sxl = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_status_uxl = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_status_xs = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_status_fs = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_status_mpp = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_status_vs = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_hstatus_vsxl = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_hstatus_zero3 = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_hstatus_zero2 = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_gstatus_dprv = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_gstatus_prv = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_gstatus_sxl = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_gstatus_uxl = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_gstatus_xs = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_gstatus_fs = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_gstatus_mpp = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_ptw_gstatus_vs = 2'h0; // @[DCache.scala:120:32]
wire [1:0] pma_checker_real_hits_lo_lo_hi = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker_real_hits_lo_hi_hi = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker_real_hits_hi_lo_hi = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker_real_hits_hi_hi_lo = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker_real_hits_hi_hi_hi = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker__special_entry_level_T = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker_special_entry_data_0_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_special_entry_data_0_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_special_entry_data_0_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_special_entry_data_0_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_waddr = 2'h0; // @[TLB.scala:477:22]
wire [1:0] pma_checker_superpage_entries_0_data_0_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_0_data_0_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_0_data_0_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_0_data_0_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_1_data_0_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_1_data_0_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_1_data_0_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_1_data_0_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_2_data_0_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_2_data_0_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_2_data_0_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_2_data_0_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_3_data_0_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_3_data_0_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_3_data_0_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_3_data_0_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_idx = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker_sectored_entries_0_0_data_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_0_data_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_0_data_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_0_data_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_idx_1 = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker_sectored_entries_0_1_data_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_1_data_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_1_data_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_1_data_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_idx_2 = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker_sectored_entries_0_2_data_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_2_data_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_2_data_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_2_data_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_idx_3 = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker_sectored_entries_0_3_data_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_3_data_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_3_data_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_3_data_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_idx_4 = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker_sectored_entries_0_4_data_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_4_data_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_4_data_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_4_data_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_idx_5 = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker_sectored_entries_0_5_data_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_5_data_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_5_data_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_5_data_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_idx_6 = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker_sectored_entries_0_6_data_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_6_data_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_6_data_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_6_data_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_idx_7 = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker_sectored_entries_0_7_data_lo_hi_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_7_data_hi_lo_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_7_data_hi_lo_hi_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_7_data_hi_hi_lo_hi = 2'h0; // @[TLB.scala:217:24]
wire [1:0] pma_checker_lo_lo = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_lo_hi = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_hi_lo = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_hi_hi = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_hi_2 = 2'h0; // @[OneHot.scala:30:18]
wire [1:0] pma_checker_lo_2 = 2'h0; // @[OneHot.scala:31:18]
wire [1:0] pma_checker__state_vec_0_T = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker__state_vec_0_T_11 = 2'h0; // @[Replacement.scala:207:62]
wire [1:0] pma_checker_lo_3 = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_hi_3 = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_hi_4 = 2'h0; // @[OneHot.scala:30:18]
wire [1:0] pma_checker_lo_4 = 2'h0; // @[OneHot.scala:31:18]
wire [1:0] pma_checker_state_reg_touch_way_sized = 2'h0; // @[package.scala:163:13]
wire [1:0] pma_checker__multipleHits_T_3 = 2'h0; // @[Misc.scala:182:39]
wire [1:0] pma_checker__multipleHits_T_12 = 2'h0; // @[Misc.scala:182:39]
wire [1:0] pma_checker__multipleHits_T_24 = 2'h0; // @[Misc.scala:182:39]
wire [1:0] pma_checker__multipleHits_T_32 = 2'h0; // @[Misc.scala:181:37]
wire [1:0] pma_checker__multipleHits_T_37 = 2'h0; // @[Misc.scala:182:39]
wire [1:0] pma_checker__r_superpage_repl_addr_T_3 = 2'h0; // @[Replacement.scala:249:12]
wire [1:0] pma_checker_r_superpage_repl_addr_valids_lo = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker_r_superpage_repl_addr_valids_hi = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker__r_superpage_repl_addr_T_12 = 2'h0; // @[Mux.scala:50:70]
wire [1:0] pma_checker__r_superpage_repl_addr_T_13 = 2'h0; // @[TLB.scala:757:8]
wire [1:0] pma_checker__r_sectored_repl_addr_T_3 = 2'h0; // @[Replacement.scala:249:12]
wire [1:0] pma_checker__r_sectored_repl_addr_T_7 = 2'h0; // @[Replacement.scala:249:12]
wire [1:0] pma_checker__r_sectored_repl_addr_T_8 = 2'h0; // @[Replacement.scala:250:16]
wire [1:0] pma_checker_r_sectored_repl_addr_valids_lo_lo = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker_r_sectored_repl_addr_valids_lo_hi = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker_r_sectored_repl_addr_valids_hi_lo = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker_r_sectored_repl_addr_valids_hi_hi = 2'h0; // @[package.scala:45:27]
wire [1:0] pma_checker_r_sectored_hit_bits_lo_lo = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_r_sectored_hit_bits_lo_hi = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_r_sectored_hit_bits_hi_lo = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_r_sectored_hit_bits_hi_hi = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_r_sectored_hit_bits_hi_2 = 2'h0; // @[OneHot.scala:30:18]
wire [1:0] pma_checker_r_sectored_hit_bits_lo_2 = 2'h0; // @[OneHot.scala:31:18]
wire [1:0] pma_checker__r_sectored_hit_bits_T_4 = 2'h0; // @[OneHot.scala:32:28]
wire [1:0] pma_checker__r_sectored_hit_bits_T_6 = 2'h0; // @[OneHot.scala:32:10]
wire [1:0] pma_checker_r_superpage_hit_bits_lo = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_r_superpage_hit_bits_hi = 2'h0; // @[OneHot.scala:21:45]
wire [1:0] pma_checker_r_superpage_hit_bits_hi_1 = 2'h0; // @[OneHot.scala:30:18]
wire [1:0] pma_checker_r_superpage_hit_bits_lo_1 = 2'h0; // @[OneHot.scala:31:18]
wire [1:0] pma_checker__r_superpage_hit_bits_T_2 = 2'h0; // @[OneHot.scala:32:28]
wire [1:0] pma_checker__r_superpage_hit_bits_T_4 = 2'h0; // @[OneHot.scala:32:10]
wire [1:0] s1_meta_hit_state_meta_state = 2'h0; // @[Metadata.scala:160:20]
wire [1:0] _s2_valid_no_xcpt_T_1 = 2'h0; // @[DCache.scala:332:54]
wire [1:0] s2_meta_correctable_errors_lo = 2'h0; // @[package.scala:45:27]
wire [1:0] s2_meta_correctable_errors_hi = 2'h0; // @[package.scala:45:27]
wire [1:0] s2_meta_uncorrectable_errors_lo = 2'h0; // @[package.scala:45:27]
wire [1:0] s2_meta_uncorrectable_errors_hi = 2'h0; // @[package.scala:45:27]
wire [1:0] _r_T_1 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_3 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_5 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_15 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_75 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_79 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_83 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_87 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_91 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_139 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_143 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_147 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_151 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_155 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] metaArb_io_in_1_bits_data_new_meta_coh_meta_state = 2'h0; // @[Metadata.scala:160:20]
wire [1:0] _metaArb_io_in_3_bits_data_T_2 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _metaArb_io_in_3_bits_data_T_4 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] probe_bits_res_param = 2'h0; // @[DCache.scala:1202:19]
wire [1:0] _nodeOut_c_bits_legal_T_2 = 2'h0; // @[Parameters.scala:137:41]
wire [1:0] _nodeOut_c_bits_legal_T_42 = 2'h0; // @[Parameters.scala:137:41]
wire [1:0] _io_cpu_s2_xcpt_WIRE_size = 2'h0; // @[DCache.scala:933:74]
wire [1:0] metaArb_io_in_0_bits_data_meta_state = 2'h0; // @[Metadata.scala:160:20]
wire [1:0] metaArb_io_in_0_bits_data_meta_1_coh_state = 2'h0; // @[HellaCache.scala:305:20]
wire [3:0] pma_checker__r_superpage_repl_addr_T_5 = 4'hF; // @[TLB.scala:757:43]
wire [3:0] metaArb_io_in_0_bits_way_en = 4'hF; // @[DCache.scala:135:28]
wire [3:0] dataArb_io_in_2_bits_way_en = 4'hF; // @[DCache.scala:152:28]
wire [3:0] dataArb_io_in_3_bits_way_en = 4'hF; // @[DCache.scala:152:28]
wire [3:0] _dataArb_io_in_3_bits_way_en_T = 4'hF; // @[DCache.scala:257:35]
wire [3:0] _r_T_12 = 4'hF; // @[Metadata.scala:65:10]
wire [3:0] tl_out_a_bits_a_mask_lo = 4'hF; // @[Misc.scala:222:10]
wire [3:0] tl_out_a_bits_a_mask_hi = 4'hF; // @[Misc.scala:222:10]
wire [3:0] _dataArb_io_in_2_bits_way_en_T = 4'hF; // @[DCache.scala:906:35]
wire [3:0] _metaArb_io_in_0_bits_way_en_T = 4'hF; // @[DCache.scala:1049:35]
wire [7:0] pma_checker__r_sectored_repl_addr_T_11 = 8'hFF; // @[TLB.scala:757:43]
wire [7:0] dataArb_io_in_1_bits_eccMask = 8'hFF; // @[DCache.scala:152:28]
wire [7:0] dataArb_io_in_2_bits_eccMask = 8'hFF; // @[DCache.scala:152:28]
wire [7:0] dataArb_io_in_3_bits_eccMask = 8'hFF; // @[DCache.scala:152:28]
wire [7:0] _dataArb_io_in_3_bits_wordMask_T = 8'hFF; // @[DCache.scala:254:9]
wire [7:0] _dataArb_io_in_3_bits_eccMask_T = 8'hFF; // @[DCache.scala:256:36]
wire [7:0] tl_out_a_bits_a_mask = 8'hFF; // @[Edges.scala:346:17]
wire [7:0] _tl_out_a_bits_a_mask_T = 8'hFF; // @[Misc.scala:222:10]
wire [7:0] _dataArb_io_in_1_bits_eccMask_T = 8'hFF; // @[DCache.scala:732:38]
wire [7:0] _dataArb_io_in_2_bits_eccMask_T = 8'hFF; // @[DCache.scala:905:36]
wire [2:0] pma_checker__r_sectored_repl_addr_T_20 = 3'h6; // @[Mux.scala:50:70]
wire [2:0] tl_out_a_bits_a_opcode = 3'h6; // @[Edges.scala:346:17]
wire [2:0] _tl_out_a_bits_a_mask_sizeOH_T = 3'h6; // @[Misc.scala:202:34]
wire [2:0] nodeOut_c_bits_c_opcode = 3'h6; // @[Edges.scala:380:17]
wire [3:0] io_ptw_hgatp_mode = 4'h0; // @[DCache.scala:101:7]
wire [3:0] io_ptw_vsatp_mode = 4'h0; // @[DCache.scala:101:7]
wire [3:0] pma_checker_io_ptw_ptbr_mode = 4'h0; // @[DCache.scala:120:32]
wire [3:0] pma_checker_io_ptw_hgatp_mode = 4'h0; // @[DCache.scala:120:32]
wire [3:0] pma_checker_io_ptw_vsatp_mode = 4'h0; // @[DCache.scala:120:32]
wire [3:0] pma_checker_satp_mode = 4'h0; // @[TLB.scala:373:17]
wire [3:0] pma_checker_real_hits_hi_hi = 4'h0; // @[package.scala:45:27]
wire [3:0] pma_checker_lo = 4'h0; // @[OneHot.scala:21:45]
wire [3:0] pma_checker_hi = 4'h0; // @[OneHot.scala:21:45]
wire [3:0] pma_checker_hi_1 = 4'h0; // @[OneHot.scala:30:18]
wire [3:0] pma_checker_lo_1 = 4'h0; // @[OneHot.scala:31:18]
wire [3:0] pma_checker__multipleHits_T_31 = 4'h0; // @[Misc.scala:182:39]
wire [3:0] pma_checker_r_superpage_repl_addr_valids = 4'h0; // @[package.scala:45:27]
wire [3:0] pma_checker_r_sectored_repl_addr_valids_lo = 4'h0; // @[package.scala:45:27]
wire [3:0] pma_checker_r_sectored_repl_addr_valids_hi = 4'h0; // @[package.scala:45:27]
wire [3:0] pma_checker_r_sectored_hit_bits_lo = 4'h0; // @[OneHot.scala:21:45]
wire [3:0] pma_checker_r_sectored_hit_bits_hi = 4'h0; // @[OneHot.scala:21:45]
wire [3:0] pma_checker_r_sectored_hit_bits_hi_1 = 4'h0; // @[OneHot.scala:30:18]
wire [3:0] pma_checker_r_sectored_hit_bits_lo_1 = 4'h0; // @[OneHot.scala:31:18]
wire [3:0] pma_checker__r_sectored_hit_bits_T_2 = 4'h0; // @[OneHot.scala:32:28]
wire [3:0] pma_checker__r_superpage_hit_bits_T = 4'h0; // @[OneHot.scala:21:45]
wire [3:0] metaArb_io_in_1_bits_way_en = 4'h0; // @[DCache.scala:135:28]
wire [3:0] s2_meta_correctable_errors = 4'h0; // @[package.scala:45:27]
wire [3:0] s2_meta_uncorrectable_errors = 4'h0; // @[package.scala:45:27]
wire [3:0] _s2_meta_error_T = 4'h0; // @[DCache.scala:362:53]
wire [3:0] _r_T_16 = 4'h0; // @[Metadata.scala:68:10]
wire [3:0] _r_T_63 = 4'h0; // @[Metadata.scala:125:10]
wire [3:0] _r_T_127 = 4'h0; // @[Metadata.scala:125:10]
wire [3:0] _metaArb_io_in_1_bits_way_en_T_4 = 4'h0; // @[Mux.scala:50:70]
wire [3:0] _metaArb_io_in_1_bits_way_en_T_5 = 4'h0; // @[Mux.scala:50:70]
wire [3:0] _metaArb_io_in_1_bits_way_en_T_6 = 4'h0; // @[Mux.scala:50:70]
wire [3:0] _metaArb_io_in_1_bits_way_en_T_7 = 4'h0; // @[Mux.scala:50:70]
wire [3:0] _metaArb_io_in_1_bits_way_en_T_8 = 4'h0; // @[DCache.scala:452:69]
wire [3:0] _metaArb_io_in_1_bits_way_en_T_9 = 4'h0; // @[DCache.scala:452:64]
wire [3:0] _a_mask_T = 4'h0; // @[DCache.scala:582:90]
wire [3:0] _atomics_WIRE_size = 4'h0; // @[DCache.scala:587:51]
wire [3:0] _atomics_WIRE_1_size = 4'h0; // @[DCache.scala:587:38]
wire [3:0] _metaArb_io_in_3_bits_data_T_5 = 4'h0; // @[Metadata.scala:87:10]
wire [3:0] probe_bits_res_size = 4'h0; // @[DCache.scala:1202:19]
wire [2:0] pma_checker_real_hits_lo_lo = 3'h0; // @[package.scala:45:27]
wire [2:0] pma_checker_real_hits_lo_hi = 3'h0; // @[package.scala:45:27]
wire [2:0] pma_checker_real_hits_hi_lo = 3'h0; // @[package.scala:45:27]
wire [2:0] pma_checker_special_entry_data_0_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_special_entry_data_0_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_special_entry_data_0_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_0_data_0_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_0_data_0_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_0_data_0_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_1_data_0_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_1_data_0_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_1_data_0_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_2_data_0_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_2_data_0_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_2_data_0_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_3_data_0_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_3_data_0_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_3_data_0_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_waddr_1 = 3'h0; // @[TLB.scala:485:22]
wire [2:0] pma_checker_sectored_entries_0_0_data_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_0_data_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_0_data_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_1_data_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_1_data_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_1_data_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_2_data_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_2_data_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_2_data_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_3_data_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_3_data_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_3_data_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_4_data_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_4_data_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_4_data_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_5_data_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_5_data_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_5_data_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_6_data_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_6_data_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_6_data_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_7_data_hi_lo_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_7_data_hi_lo_hi = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_7_data_hi_hi_lo = 3'h0; // @[TLB.scala:217:24]
wire [2:0] pma_checker_state_vec_0_touch_way_sized = 3'h0; // @[package.scala:163:13]
wire [2:0] pma_checker_state_vec_0_left_subtree_state = 3'h0; // @[package.scala:163:13]
wire [2:0] pma_checker_state_vec_0_right_subtree_state = 3'h0; // @[Replacement.scala:198:38]
wire [2:0] pma_checker__state_vec_0_T_10 = 3'h0; // @[Replacement.scala:203:16]
wire [2:0] pma_checker__multipleHits_T_1 = 3'h0; // @[Misc.scala:181:37]
wire [2:0] pma_checker__multipleHits_T_10 = 3'h0; // @[Misc.scala:182:39]
wire [2:0] pma_checker__multipleHits_T_22 = 3'h0; // @[Misc.scala:181:37]
wire [2:0] pma_checker_r_sectored_repl_addr_left_subtree_state = 3'h0; // @[package.scala:163:13]
wire [2:0] pma_checker_r_sectored_repl_addr_right_subtree_state = 3'h0; // @[Replacement.scala:245:38]
wire [2:0] pma_checker__r_sectored_repl_addr_T_9 = 3'h0; // @[Replacement.scala:249:12]
wire [2:0] pma_checker__r_sectored_repl_addr_T_26 = 3'h0; // @[Mux.scala:50:70]
wire [2:0] pma_checker__r_sectored_repl_addr_T_27 = 3'h0; // @[TLB.scala:757:8]
wire [2:0] pma_checker__r_sectored_hit_bits_T_7 = 3'h0; // @[OneHot.scala:32:10]
wire [2:0] get_param = 3'h0; // @[Edges.scala:460:17]
wire [2:0] put_opcode = 3'h0; // @[Edges.scala:480:17]
wire [2:0] put_param = 3'h0; // @[Edges.scala:480:17]
wire [2:0] putpartial_param = 3'h0; // @[Edges.scala:500:17]
wire [2:0] _atomics_WIRE_opcode = 3'h0; // @[DCache.scala:587:51]
wire [2:0] _atomics_WIRE_param = 3'h0; // @[DCache.scala:587:51]
wire [2:0] _atomics_WIRE_1_opcode = 3'h0; // @[DCache.scala:587:38]
wire [2:0] _atomics_WIRE_1_param = 3'h0; // @[DCache.scala:587:38]
wire [2:0] atomics_a_1_param = 3'h0; // @[Edges.scala:534:17]
wire [2:0] atomics_a_5_param = 3'h0; // @[Edges.scala:517:17]
wire [2:0] probe_bits_res_opcode = 3'h0; // @[DCache.scala:1202:19]
wire [2:0] pma_checker__state_vec_0_T_9 = 3'h5; // @[Replacement.scala:202:12]
wire [2:0] pma_checker__state_vec_0_T_20 = 3'h5; // @[Replacement.scala:202:12]
wire [2:0] pma_checker__state_vec_0_T_21 = 3'h5; // @[Replacement.scala:206:16]
wire [2:0] pma_checker__state_reg_T_8 = 3'h5; // @[Replacement.scala:202:12]
wire [2:0] pma_checker__r_sectored_repl_addr_T_21 = 3'h5; // @[Mux.scala:50:70]
wire [2:0] tl_out_a_bits_a_mask_sizeOH = 3'h5; // @[Misc.scala:202:81]
wire [2:0] nackResponseMessage_param = 3'h5; // @[Edges.scala:416:17]
wire [2:0] dirtyReleaseMessage_opcode = 3'h5; // @[Edges.scala:433:17]
wire [2:0] pma_checker__r_sectored_repl_addr_T_22 = 3'h4; // @[Mux.scala:50:70]
wire [2:0] get_opcode = 3'h4; // @[Edges.scala:460:17]
wire [2:0] atomics_a_4_param = 3'h4; // @[Edges.scala:517:17]
wire [2:0] _tl_out_a_bits_a_mask_sizeOH_T_2 = 3'h4; // @[OneHot.scala:65:27]
wire [2:0] nackResponseMessage_opcode = 3'h4; // @[Edges.scala:416:17]
wire [2:0] cleanReleaseMessage_opcode = 3'h4; // @[Edges.scala:416:17]
wire [1:0] pma_checker__r_superpage_repl_addr_T_11 = 2'h1; // @[Mux.scala:50:70]
wire [1:0] _r_T_7 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_9 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_17 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_19 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] dataArb_io_in_0_bits_wordMask_wordMask = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _dataArb_io_in_0_bits_wordMask_T_2 = 2'h1; // @[DCache.scala:555:20]
wire [1:0] _metaArb_io_in_3_bits_data_T_6 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] pma_checker_state_vec_0_hi = 2'h2; // @[Replacement.scala:202:12]
wire [1:0] pma_checker_state_vec_0_hi_1 = 2'h2; // @[Replacement.scala:202:12]
wire [1:0] pma_checker_state_reg_hi = 2'h2; // @[Replacement.scala:202:12]
wire [1:0] pma_checker__r_superpage_repl_addr_T_10 = 2'h2; // @[Mux.scala:50:70]
wire [1:0] pma_checker__state_T = 2'h2; // @[TLB.scala:704:45]
wire [1:0] _r_T_118 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _r_T_120 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _r_T_122 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] tl_out_a_bits_a_mask_sizeOH_shiftAmount = 2'h2; // @[OneHot.scala:64:49]
wire [2:0] pma_checker__r_sectored_repl_addr_T_23 = 3'h3; // @[Mux.scala:50:70]
wire [2:0] atomics_a_opcode = 3'h3; // @[Edges.scala:534:17]
wire [2:0] atomics_a_param = 3'h3; // @[Edges.scala:534:17]
wire [2:0] atomics_a_1_opcode = 3'h3; // @[Edges.scala:534:17]
wire [2:0] atomics_a_2_opcode = 3'h3; // @[Edges.scala:534:17]
wire [2:0] atomics_a_3_opcode = 3'h3; // @[Edges.scala:534:17]
wire [2:0] atomics_a_8_param = 3'h3; // @[Edges.scala:517:17]
wire [2:0] pma_checker__r_sectored_repl_addr_T_24 = 3'h2; // @[Mux.scala:50:70]
wire [2:0] atomics_a_3_param = 3'h2; // @[Edges.scala:534:17]
wire [2:0] atomics_a_4_opcode = 3'h2; // @[Edges.scala:517:17]
wire [2:0] atomics_a_5_opcode = 3'h2; // @[Edges.scala:517:17]
wire [2:0] atomics_a_6_opcode = 3'h2; // @[Edges.scala:517:17]
wire [2:0] atomics_a_7_opcode = 3'h2; // @[Edges.scala:517:17]
wire [2:0] atomics_a_7_param = 3'h2; // @[Edges.scala:517:17]
wire [2:0] atomics_a_8_opcode = 3'h2; // @[Edges.scala:517:17]
wire [2:0] pma_checker_mpu_priv = 3'h1; // @[TLB.scala:415:27]
wire [2:0] pma_checker__r_sectored_repl_addr_T_25 = 3'h1; // @[Mux.scala:50:70]
wire [2:0] putpartial_opcode = 3'h1; // @[Edges.scala:500:17]
wire [2:0] atomics_a_2_param = 3'h1; // @[Edges.scala:534:17]
wire [2:0] atomics_a_6_param = 3'h1; // @[Edges.scala:517:17]
wire [3:0] pma_checker_state_vec_0_hi_2 = 4'h8; // @[Replacement.scala:202:12]
wire [3:0] _r_T_71 = 4'h8; // @[Metadata.scala:133:10]
wire [3:0] _r_T_135 = 4'h8; // @[Metadata.scala:133:10]
wire [11:0] pma_checker__gpa_hits_hit_mask_T_2 = 12'h0; // @[TLB.scala:606:24]
wire [11:0] pma_checker__io_resp_gpa_offset_T = 12'h0; // @[TLB.scala:658:47]
wire [26:0] pma_checker_io_ptw_req_bits_bits_addr = 27'h0; // @[DCache.scala:120:32]
wire [26:0] pma_checker__io_resp_gpa_page_T_2 = 27'h0; // @[TLB.scala:657:58]
wire [6:0] pma_checker__state_vec_0_T_22 = 7'h45; // @[Replacement.scala:202:12]
wire [38:0] pma_checker_io_sfence_bits_addr = 39'h0; // @[DCache.scala:120:32]
wire [38:0] pma_checker_io_ptw_resp_bits_gpa_bits = 39'h0; // @[DCache.scala:120:32]
wire [15:0] io_ptw_hgatp_asid = 16'h0; // @[DCache.scala:101:7]
wire [15:0] io_ptw_vsatp_asid = 16'h0; // @[DCache.scala:101:7]
wire [15:0] pma_checker_io_ptw_ptbr_asid = 16'h0; // @[DCache.scala:120:32]
wire [15:0] pma_checker_io_ptw_hgatp_asid = 16'h0; // @[DCache.scala:120:32]
wire [15:0] pma_checker_io_ptw_vsatp_asid = 16'h0; // @[DCache.scala:120:32]
wire [15:0] pma_checker_satp_asid = 16'h0; // @[TLB.scala:373:17]
wire [43:0] io_ptw_hgatp_ppn = 44'h0; // @[DCache.scala:101:7]
wire [43:0] io_ptw_vsatp_ppn = 44'h0; // @[DCache.scala:101:7]
wire [43:0] pma_checker_io_ptw_resp_bits_pte_ppn = 44'h0; // @[DCache.scala:120:32]
wire [43:0] pma_checker_io_ptw_ptbr_ppn = 44'h0; // @[DCache.scala:120:32]
wire [43:0] pma_checker_io_ptw_hgatp_ppn = 44'h0; // @[DCache.scala:120:32]
wire [43:0] pma_checker_io_ptw_vsatp_ppn = 44'h0; // @[DCache.scala:120:32]
wire [43:0] pma_checker_satp_ppn = 44'h0; // @[TLB.scala:373:17]
wire [29:0] io_ptw_hstatus_zero6 = 30'h0; // @[DCache.scala:101:7]
wire [29:0] pma_checker_io_ptw_hstatus_zero6 = 30'h0; // @[DCache.scala:120:32]
wire [8:0] io_ptw_hstatus_zero5 = 9'h0; // @[DCache.scala:101:7]
wire [8:0] pma_checker_io_ptw_hstatus_zero5 = 9'h0; // @[DCache.scala:120:32]
wire [5:0] io_ptw_hstatus_vgein = 6'h0; // @[DCache.scala:101:7]
wire [5:0] pma_checker_io_ptw_hstatus_vgein = 6'h0; // @[DCache.scala:120:32]
wire [5:0] pma_checker_real_hits_lo = 6'h0; // @[package.scala:45:27]
wire [5:0] pma_checker_special_entry_data_0_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_superpage_entries_0_data_0_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_superpage_entries_1_data_0_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_superpage_entries_2_data_0_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_superpage_entries_3_data_0_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_sectored_entries_0_0_data_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_sectored_entries_0_1_data_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_sectored_entries_0_2_data_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_sectored_entries_0_3_data_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_sectored_entries_0_4_data_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_sectored_entries_0_5_data_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_sectored_entries_0_6_data_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker_sectored_entries_0_7_data_hi_lo = 6'h0; // @[TLB.scala:217:24]
wire [5:0] pma_checker__multipleHits_T = 6'h0; // @[Misc.scala:181:37]
wire [31:0] io_ptw_gstatus_isa = 32'h0; // @[DCache.scala:101:7]
wire [31:0] pma_checker_io_ptw_status_isa = 32'h0; // @[DCache.scala:120:32]
wire [31:0] pma_checker_io_ptw_gstatus_isa = 32'h0; // @[DCache.scala:120:32]
wire [31:0] _atomics_WIRE_address = 32'h0; // @[DCache.scala:587:51]
wire [31:0] _atomics_WIRE_1_address = 32'h0; // @[DCache.scala:587:38]
wire [31:0] nodeOut_c_bits_c_address = 32'h0; // @[Edges.scala:380:17]
wire [31:0] nodeOut_c_bits_c_1_address = 32'h0; // @[Edges.scala:396:17]
wire [31:0] _io_cpu_s2_xcpt_WIRE_paddr = 32'h0; // @[DCache.scala:933:74]
wire [22:0] io_ptw_gstatus_zero2 = 23'h0; // @[DCache.scala:101:7]
wire [22:0] pma_checker_io_ptw_status_zero2 = 23'h0; // @[DCache.scala:120:32]
wire [22:0] pma_checker_io_ptw_gstatus_zero2 = 23'h0; // @[DCache.scala:120:32]
wire [39:0] io_tlb_port_req_bits_vaddr = 40'h0; // @[DCache.scala:101:7]
wire [39:0] _io_cpu_s2_xcpt_WIRE_gpa = 40'h0; // @[DCache.scala:933:74]
wire [25:0] metaArb_io_in_0_bits_data = 26'h0; // @[DCache.scala:135:28]
wire [25:0] _metaArb_io_in_0_bits_data_T = 26'h0; // @[DCache.scala:1050:85]
wire [23:0] metaArb_io_in_0_bits_data_meta_1_tag = 24'h0; // @[HellaCache.scala:305:20]
wire [3:0] _r_T_10 = 4'h6; // @[Metadata.scala:64:10]
wire [3:0] _r_T_65 = 4'h6; // @[Metadata.scala:127:10]
wire [3:0] _r_T_129 = 4'h6; // @[Metadata.scala:127:10]
wire [3:0] tl_out_a_bits_a_size = 4'h6; // @[Edges.scala:346:17]
wire [3:0] _release_state_T_13 = 4'h6; // @[DCache.scala:820:27]
wire [3:0] nodeOut_c_bits_c_size = 4'h6; // @[Edges.scala:380:17]
wire [3:0] nodeOut_c_bits_c_1_size = 4'h6; // @[Edges.scala:396:17]
wire [2:0] nodeOut_c_bits_c_1_opcode = 3'h7; // @[Edges.scala:396:17]
wire [32:0] _nodeOut_c_bits_legal_T_33 = 33'h80000000; // @[Parameters.scala:137:41]
wire [32:0] _nodeOut_c_bits_legal_T_34 = 33'h80000000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_35 = 33'h80000000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_73 = 33'h80000000; // @[Parameters.scala:137:41]
wire [32:0] _nodeOut_c_bits_legal_T_74 = 33'h80000000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_75 = 33'h80000000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_29 = 33'h8000000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_30 = 33'h8000000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_69 = 33'h8000000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_70 = 33'h8000000; // @[Parameters.scala:137:46]
wire [28:0] _nodeOut_c_bits_legal_T_28 = 29'h8000000; // @[Parameters.scala:137:41]
wire [28:0] _nodeOut_c_bits_legal_T_68 = 29'h8000000; // @[Parameters.scala:137:41]
wire [32:0] _nodeOut_c_bits_legal_T_18 = 33'hC000000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_19 = 33'hC000000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_58 = 33'hC000000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_59 = 33'hC000000; // @[Parameters.scala:137:46]
wire [28:0] _nodeOut_c_bits_legal_T_17 = 29'hC000000; // @[Parameters.scala:137:41]
wire [28:0] _nodeOut_c_bits_legal_T_57 = 29'hC000000; // @[Parameters.scala:137:41]
wire [32:0] _nodeOut_c_bits_legal_T_13 = 33'h20000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_14 = 33'h20000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_53 = 33'h20000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_54 = 33'h20000; // @[Parameters.scala:137:46]
wire [18:0] _nodeOut_c_bits_legal_T_12 = 19'h20000; // @[Parameters.scala:137:41]
wire [18:0] _nodeOut_c_bits_legal_T_52 = 19'h20000; // @[Parameters.scala:137:41]
wire [32:0] _nodeOut_c_bits_legal_T_8 = 33'h10000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_9 = 33'h10000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_48 = 33'h10000; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_49 = 33'h10000; // @[Parameters.scala:137:46]
wire [17:0] _nodeOut_c_bits_legal_T_7 = 18'h10000; // @[Parameters.scala:137:41]
wire [17:0] _nodeOut_c_bits_legal_T_47 = 18'h10000; // @[Parameters.scala:137:41]
wire [32:0] _nodeOut_c_bits_legal_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_4 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_43 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] _nodeOut_c_bits_legal_T_44 = 33'h0; // @[Parameters.scala:137:46]
wire [3:0] _r_T_24 = 4'hC; // @[Metadata.scala:72:10]
wire [3:0] _metaArb_io_in_3_bits_data_T_9 = 4'hC; // @[Metadata.scala:89:10]
wire [3:0] _r_T_20 = 4'h4; // @[Metadata.scala:70:10]
wire [3:0] _r_T_67 = 4'h4; // @[Metadata.scala:129:10]
wire [3:0] _r_T_131 = 4'h4; // @[Metadata.scala:129:10]
wire [3:0] _tl_out_a_bits_a_mask_sizeOH_T_1 = 4'h4; // @[OneHot.scala:65:12]
wire [3:0] _metaArb_io_in_3_bits_data_T_7 = 4'h4; // @[Metadata.scala:88:10]
wire [3:0] _r_T_6 = 4'h1; // @[Metadata.scala:62:10]
wire [3:0] _r_T_62 = 4'h1; // @[Metadata.scala:124:10]
wire [3:0] _r_T_126 = 4'h1; // @[Metadata.scala:124:10]
wire [3:0] _metaArb_io_in_3_bits_data_T_3 = 4'h1; // @[Metadata.scala:86:10]
wire [4:0] _s1_data_way_T_1 = 5'h10; // @[DCache.scala:694:32]
wire [3:0] _r_T_70 = 4'h9; // @[Metadata.scala:132:10]
wire [3:0] _r_T_134 = 4'h9; // @[Metadata.scala:132:10]
wire [3:0] _r_T_69 = 4'hA; // @[Metadata.scala:131:10]
wire [3:0] _r_T_133 = 4'hA; // @[Metadata.scala:131:10]
wire [3:0] _r_T_68 = 4'hB; // @[Metadata.scala:130:10]
wire [3:0] _r_T_132 = 4'hB; // @[Metadata.scala:130:10]
wire [3:0] _r_T_18 = 4'h5; // @[Metadata.scala:69:10]
wire [3:0] _r_T_66 = 4'h5; // @[Metadata.scala:128:10]
wire [3:0] _r_T_130 = 4'h5; // @[Metadata.scala:128:10]
wire [3:0] _r_T_8 = 4'h7; // @[Metadata.scala:63:10]
wire [3:0] _r_T_64 = 4'h7; // @[Metadata.scala:126:10]
wire [3:0] _r_T_128 = 4'h7; // @[Metadata.scala:126:10]
wire [3:0] _r_T_4 = 4'h2; // @[Metadata.scala:61:10]
wire [3:0] _r_T_61 = 4'h2; // @[Metadata.scala:123:10]
wire [3:0] _r_T_125 = 4'h2; // @[Metadata.scala:123:10]
wire [3:0] _r_T_2 = 4'h3; // @[Metadata.scala:60:10]
wire [3:0] _r_T_60 = 4'h3; // @[Metadata.scala:122:10]
wire [3:0] _r_T_124 = 4'h3; // @[Metadata.scala:122:10]
wire [3:0] _r_T_22 = 4'hD; // @[Metadata.scala:71:10]
wire [3:0] _r_T_14 = 4'hE; // @[Metadata.scala:66:10]
wire [13:0] pma_checker__ae_array_T_2 = 14'h0; // @[TLB.scala:583:8]
wire [13:0] pma_checker__ae_st_array_T_7 = 14'h0; // @[TLB.scala:590:8]
wire [13:0] pma_checker__ae_st_array_T_10 = 14'h0; // @[TLB.scala:591:8]
wire [13:0] pma_checker__must_alloc_array_T_3 = 14'h0; // @[TLB.scala:594:8]
wire [13:0] pma_checker__must_alloc_array_T_6 = 14'h0; // @[TLB.scala:595:8]
wire [13:0] pma_checker__must_alloc_array_T_9 = 14'h0; // @[TLB.scala:596:8]
wire [13:0] pma_checker__gf_ld_array_T_2 = 14'h0; // @[TLB.scala:600:46]
wire [13:0] pma_checker_gf_ld_array = 14'h0; // @[TLB.scala:600:24]
wire [13:0] pma_checker__gf_st_array_T_1 = 14'h0; // @[TLB.scala:601:53]
wire [13:0] pma_checker_gf_st_array = 14'h0; // @[TLB.scala:601:24]
wire [13:0] pma_checker__gf_inst_array_T = 14'h0; // @[TLB.scala:602:36]
wire [13:0] pma_checker_gf_inst_array = 14'h0; // @[TLB.scala:602:26]
wire [13:0] pma_checker_gpa_hits_need_gpa_mask = 14'h0; // @[TLB.scala:605:73]
wire [13:0] pma_checker__io_resp_gf_ld_T_1 = 14'h0; // @[TLB.scala:637:58]
wire [13:0] pma_checker__io_resp_gf_st_T_1 = 14'h0; // @[TLB.scala:638:65]
wire [13:0] pma_checker__io_resp_gf_inst_T = 14'h0; // @[TLB.scala:639:48]
wire [6:0] pma_checker_real_hits_hi = 7'h0; // @[package.scala:45:27]
wire [6:0] pma_checker__state_vec_WIRE_0 = 7'h0; // @[Replacement.scala:305:25]
wire [6:0] pma_checker__multipleHits_T_21 = 7'h0; // @[Misc.scala:182:39]
wire [12:0] pma_checker_real_hits = 13'h0; // @[package.scala:45:27]
wire [12:0] pma_checker__stage1_bypass_T = 13'h0; // @[TLB.scala:517:27]
wire [12:0] pma_checker_stage1_bypass = 13'h0; // @[TLB.scala:517:61]
wire [12:0] pma_checker__r_array_T_2 = 13'h0; // @[TLB.scala:520:74]
wire [12:0] pma_checker__hr_array_T_2 = 13'h0; // @[TLB.scala:524:60]
wire [12:0] pma_checker__gpa_hits_T = 13'h0; // @[TLB.scala:607:30]
wire [12:0] pma_checker__tlb_hit_T = 13'h0; // @[TLB.scala:611:28]
wire [12:0] pma_checker__stage1_bypass_T_2 = 13'h1FFF; // @[TLB.scala:517:68]
wire [12:0] pma_checker__stage1_bypass_T_4 = 13'h1FFF; // @[TLB.scala:517:95]
wire [12:0] pma_checker_stage2_bypass = 13'h1FFF; // @[TLB.scala:523:27]
wire [12:0] pma_checker__hr_array_T_4 = 13'h1FFF; // @[TLB.scala:524:111]
wire [12:0] pma_checker__hw_array_T_1 = 13'h1FFF; // @[TLB.scala:525:55]
wire [12:0] pma_checker__hx_array_T_1 = 13'h1FFF; // @[TLB.scala:526:55]
wire [12:0] pma_checker__gpa_hits_hit_mask_T_4 = 13'h1FFF; // @[TLB.scala:606:88]
wire [12:0] pma_checker_gpa_hits_hit_mask = 13'h1FFF; // @[TLB.scala:606:82]
wire [12:0] pma_checker__gpa_hits_T_1 = 13'h1FFF; // @[TLB.scala:607:16]
wire [12:0] pma_checker_gpa_hits = 13'h1FFF; // @[TLB.scala:607:14]
wire [13:0] pma_checker_hr_array = 14'h3FFF; // @[TLB.scala:524:21]
wire [13:0] pma_checker_hw_array = 14'h3FFF; // @[TLB.scala:525:21]
wire [13:0] pma_checker_hx_array = 14'h3FFF; // @[TLB.scala:526:21]
wire [13:0] pma_checker__must_alloc_array_T_8 = 14'h3FFF; // @[TLB.scala:596:19]
wire [13:0] pma_checker__gf_ld_array_T_1 = 14'h3FFF; // @[TLB.scala:600:50]
wire [19:0] pma_checker_refill_ppn = 20'h0; // @[TLB.scala:406:44]
wire [19:0] pma_checker_newEntry_ppn = 20'h0; // @[TLB.scala:449:24]
wire [19:0] pma_checker__ppn_T_42 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_43 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_44 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_45 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_46 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_47 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_48 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_49 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_50 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_51 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_52 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_53 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_54 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_56 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_57 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_58 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_59 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_60 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_61 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_62 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_63 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_64 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_65 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_66 = 20'h0; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_67 = 20'h0; // @[Mux.scala:30:73]
wire [30:0] pma_checker_special_entry_data_0_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_superpage_entries_0_data_0_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_superpage_entries_1_data_0_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_superpage_entries_2_data_0_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_superpage_entries_3_data_0_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_sectored_entries_0_0_data_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_sectored_entries_0_1_data_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_sectored_entries_0_2_data_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_sectored_entries_0_3_data_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_sectored_entries_0_4_data_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_sectored_entries_0_5_data_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_sectored_entries_0_6_data_hi = 31'h0; // @[TLB.scala:217:24]
wire [30:0] pma_checker_sectored_entries_0_7_data_hi = 31'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_special_entry_data_0_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_superpage_entries_0_data_0_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_superpage_entries_1_data_0_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_superpage_entries_2_data_0_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_superpage_entries_3_data_0_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_sectored_entries_0_0_data_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_sectored_entries_0_1_data_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_sectored_entries_0_2_data_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_sectored_entries_0_3_data_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_sectored_entries_0_4_data_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_sectored_entries_0_5_data_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_sectored_entries_0_6_data_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [24:0] pma_checker_sectored_entries_0_7_data_hi_hi = 25'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_special_entry_data_0_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_superpage_entries_0_data_0_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_superpage_entries_1_data_0_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_superpage_entries_2_data_0_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_superpage_entries_3_data_0_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_sectored_entries_0_0_data_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_sectored_entries_0_1_data_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_sectored_entries_0_2_data_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_sectored_entries_0_3_data_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_sectored_entries_0_4_data_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_sectored_entries_0_5_data_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_sectored_entries_0_6_data_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [21:0] pma_checker_sectored_entries_0_7_data_hi_hi_hi = 22'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_special_entry_data_0_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_superpage_entries_0_data_0_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_superpage_entries_1_data_0_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_superpage_entries_2_data_0_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_superpage_entries_3_data_0_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_sectored_entries_0_0_data_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_sectored_entries_0_1_data_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_sectored_entries_0_2_data_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_sectored_entries_0_3_data_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_sectored_entries_0_4_data_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_sectored_entries_0_5_data_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_sectored_entries_0_6_data_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [20:0] pma_checker_sectored_entries_0_7_data_hi_hi_hi_hi = 21'h0; // @[TLB.scala:217:24]
wire [13:0] pma_checker_hits = 14'h2000; // @[TLB.scala:442:17]
wire [9:0] pma_checker_io_ptw_resp_bits_pte_reserved_for_future = 10'h0; // @[DCache.scala:120:32]
wire [31:0] _nodeOut_c_bits_legal_T_32 = 32'h80000000; // @[Parameters.scala:137:31]
wire [31:0] _nodeOut_c_bits_legal_T_72 = 32'h80000000; // @[Parameters.scala:137:31]
wire [27:0] _nodeOut_c_bits_legal_T_16 = 28'hC000000; // @[Parameters.scala:137:31]
wire [27:0] _nodeOut_c_bits_legal_T_56 = 28'hC000000; // @[Parameters.scala:137:31]
wire [27:0] _nodeOut_c_bits_legal_T_27 = 28'h8000000; // @[Parameters.scala:137:31]
wire [27:0] _nodeOut_c_bits_legal_T_67 = 28'h8000000; // @[Parameters.scala:137:31]
wire [17:0] _nodeOut_c_bits_legal_T_11 = 18'h20000; // @[Parameters.scala:137:31]
wire [17:0] _nodeOut_c_bits_legal_T_51 = 18'h20000; // @[Parameters.scala:137:31]
wire [16:0] _nodeOut_c_bits_legal_T_6 = 17'h10000; // @[Parameters.scala:137:31]
wire [16:0] _nodeOut_c_bits_legal_T_46 = 17'h10000; // @[Parameters.scala:137:31]
wire [41:0] pma_checker__mpu_ppn_WIRE_1 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_1 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_3 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_5 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_7 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_9 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_11 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_13 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_15 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_17 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_19 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_21 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_23 = 42'h0; // @[TLB.scala:170:77]
wire [41:0] pma_checker__entries_WIRE_25 = 42'h0; // @[TLB.scala:170:77]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[DCache.scala:101:7]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_b_ready; // @[MixedNode.scala:542:17]
wire nodeOut_b_valid = auto_out_b_valid_0; // @[DCache.scala:101:7]
wire [2:0] nodeOut_b_bits_opcode = auto_out_b_bits_opcode_0; // @[DCache.scala:101:7]
wire [1:0] nodeOut_b_bits_param = auto_out_b_bits_param_0; // @[DCache.scala:101:7]
wire [3:0] nodeOut_b_bits_size = auto_out_b_bits_size_0; // @[DCache.scala:101:7]
wire nodeOut_b_bits_source = auto_out_b_bits_source_0; // @[DCache.scala:101:7]
wire [31:0] nodeOut_b_bits_address = auto_out_b_bits_address_0; // @[DCache.scala:101:7]
wire [7:0] nodeOut_b_bits_mask = auto_out_b_bits_mask_0; // @[DCache.scala:101:7]
wire [63:0] nodeOut_b_bits_data = auto_out_b_bits_data_0; // @[DCache.scala:101:7]
wire nodeOut_b_bits_corrupt = auto_out_b_bits_corrupt_0; // @[DCache.scala:101:7]
wire nodeOut_c_ready = auto_out_c_ready_0; // @[DCache.scala:101:7]
wire nodeOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_c_bits_size; // @[MixedNode.scala:542:17]
wire nodeOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_c_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[DCache.scala:101:7]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[DCache.scala:101:7]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[DCache.scala:101:7]
wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[DCache.scala:101:7]
wire nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[DCache.scala:101:7]
wire [2:0] nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[DCache.scala:101:7]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[DCache.scala:101:7]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[DCache.scala:101:7]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[DCache.scala:101:7]
wire nodeOut_e_ready = auto_out_e_ready_0; // @[DCache.scala:101:7]
wire nodeOut_e_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire metaArb_io_in_7_valid = io_cpu_req_valid_0; // @[DCache.scala:101:7, :135:28]
wire _dataArb_io_in_3_valid_T_58 = io_cpu_req_valid_0; // @[DCache.scala:101:7, :242:46]
wire _s1_did_read_T_53 = io_cpu_req_valid_0; // @[DCache.scala:101:7, :259:75]
wire _pstore_drain_opportunistic_T_58 = io_cpu_req_valid_0; // @[DCache.scala:101:7, :502:55]
wire [39:0] metaArb_io_in_7_bits_addr = io_cpu_req_bits_addr_0; // @[DCache.scala:101:7, :135:28]
wire [7:0] s0_req_tag = io_cpu_req_bits_tag_0; // @[DCache.scala:101:7, :192:24]
wire [1:0] s0_req_dprv = io_cpu_req_bits_dprv_0; // @[DCache.scala:101:7, :192:24]
wire s0_req_dv = io_cpu_req_bits_dv_0; // @[DCache.scala:101:7, :192:24]
wire _io_cpu_s2_nack_T_5; // @[DCache.scala:445:86]
wire _io_cpu_s2_nack_cause_raw_T_3; // @[DCache.scala:574:54]
wire _io_cpu_s2_uncached_T_1; // @[DCache.scala:920:37]
wire _io_cpu_resp_valid_T_2; // @[DCache.scala:949:70]
wire [63:0] _io_cpu_resp_bits_data_T_24; // @[DCache.scala:974:41]
wire s2_read; // @[Consts.scala:89:68]
wire [63:0] _io_cpu_resp_bits_data_word_bypass_T_7; // @[AMOALU.scala:45:16]
wire [63:0] s2_data_word; // @[DCache.scala:970:80]
wire _io_cpu_replay_next_T_3; // @[DCache.scala:950:62]
wire _io_cpu_s2_xcpt_T_ma_ld; // @[DCache.scala:933:24]
wire _io_cpu_s2_xcpt_T_ma_st; // @[DCache.scala:933:24]
wire _io_cpu_s2_xcpt_T_pf_ld; // @[DCache.scala:933:24]
wire _io_cpu_s2_xcpt_T_pf_st; // @[DCache.scala:933:24]
wire _io_cpu_s2_xcpt_T_ae_ld; // @[DCache.scala:933:24]
wire _io_cpu_s2_xcpt_T_ae_st; // @[DCache.scala:933:24]
wire _io_cpu_ordered_T_8; // @[DCache.scala:929:21]
wire _io_cpu_store_pending_T_25; // @[DCache.scala:930:70]
wire io_cpu_perf_acquire_done; // @[Edges.scala:233:22]
wire io_cpu_perf_release_done; // @[Edges.scala:233:22]
wire _io_cpu_perf_grant_T; // @[DCache.scala:1078:39]
wire _io_cpu_perf_tlbMiss_T; // @[Decoupled.scala:51:35]
wire _io_cpu_perf_blocked_T_1; // @[DCache.scala:1106:23]
wire _io_cpu_perf_canAcceptStoreThenLoad_T_10; // @[DCache.scala:1088:41]
wire _io_cpu_perf_canAcceptStoreThenRMW_T_1; // @[DCache.scala:1091:75]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_61; // @[DCache.scala:1092:40]
wire _io_cpu_perf_storeBufferEmptyAfterLoad_T_7; // @[DCache.scala:1080:44]
wire _io_cpu_perf_storeBufferEmptyAfterStore_T_10; // @[DCache.scala:1084:45]
wire _io_errors_bus_valid_T_2; // @[DCache.scala:1129:42]
wire [2:0] auto_out_a_bits_opcode_0; // @[DCache.scala:101:7]
wire [2:0] auto_out_a_bits_param_0; // @[DCache.scala:101:7]
wire [3:0] auto_out_a_bits_size_0; // @[DCache.scala:101:7]
wire auto_out_a_bits_source_0; // @[DCache.scala:101:7]
wire [31:0] auto_out_a_bits_address_0; // @[DCache.scala:101:7]
wire [7:0] auto_out_a_bits_mask_0; // @[DCache.scala:101:7]
wire [63:0] auto_out_a_bits_data_0; // @[DCache.scala:101:7]
wire auto_out_a_valid_0; // @[DCache.scala:101:7]
wire auto_out_b_ready_0; // @[DCache.scala:101:7]
wire [2:0] auto_out_c_bits_opcode_0; // @[DCache.scala:101:7]
wire [2:0] auto_out_c_bits_param_0; // @[DCache.scala:101:7]
wire [3:0] auto_out_c_bits_size_0; // @[DCache.scala:101:7]
wire auto_out_c_bits_source_0; // @[DCache.scala:101:7]
wire [31:0] auto_out_c_bits_address_0; // @[DCache.scala:101:7]
wire [63:0] auto_out_c_bits_data_0; // @[DCache.scala:101:7]
wire auto_out_c_valid_0; // @[DCache.scala:101:7]
wire auto_out_d_ready_0; // @[DCache.scala:101:7]
wire [2:0] auto_out_e_bits_sink_0; // @[DCache.scala:101:7]
wire auto_out_e_valid_0; // @[DCache.scala:101:7]
wire io_cpu_req_ready_0; // @[DCache.scala:101:7]
wire [39:0] io_cpu_resp_bits_addr_0; // @[DCache.scala:101:7]
wire [7:0] io_cpu_resp_bits_tag_0; // @[DCache.scala:101:7]
wire [4:0] io_cpu_resp_bits_cmd_0; // @[DCache.scala:101:7]
wire [1:0] io_cpu_resp_bits_size_0; // @[DCache.scala:101:7]
wire io_cpu_resp_bits_signed_0; // @[DCache.scala:101:7]
wire [1:0] io_cpu_resp_bits_dprv_0; // @[DCache.scala:101:7]
wire io_cpu_resp_bits_dv_0; // @[DCache.scala:101:7]
wire [63:0] io_cpu_resp_bits_data_0; // @[DCache.scala:101:7]
wire [7:0] io_cpu_resp_bits_mask_0; // @[DCache.scala:101:7]
wire io_cpu_resp_bits_replay_0; // @[DCache.scala:101:7]
wire io_cpu_resp_bits_has_data_0; // @[DCache.scala:101:7]
wire [63:0] io_cpu_resp_bits_data_word_bypass_0; // @[DCache.scala:101:7]
wire [63:0] io_cpu_resp_bits_data_raw_0; // @[DCache.scala:101:7]
wire [63:0] io_cpu_resp_bits_store_data_0; // @[DCache.scala:101:7]
wire io_cpu_resp_valid_0; // @[DCache.scala:101:7]
wire io_cpu_s2_xcpt_ma_ld_0; // @[DCache.scala:101:7]
wire io_cpu_s2_xcpt_ma_st_0; // @[DCache.scala:101:7]
wire io_cpu_s2_xcpt_pf_ld_0; // @[DCache.scala:101:7]
wire io_cpu_s2_xcpt_pf_st_0; // @[DCache.scala:101:7]
wire io_cpu_s2_xcpt_ae_ld_0; // @[DCache.scala:101:7]
wire io_cpu_s2_xcpt_ae_st_0; // @[DCache.scala:101:7]
wire io_cpu_perf_acquire_0; // @[DCache.scala:101:7]
wire io_cpu_perf_release_0; // @[DCache.scala:101:7]
wire io_cpu_perf_grant_0; // @[DCache.scala:101:7]
wire io_cpu_perf_tlbMiss_0; // @[DCache.scala:101:7]
wire io_cpu_perf_blocked_0; // @[DCache.scala:101:7]
wire io_cpu_perf_canAcceptStoreThenLoad_0; // @[DCache.scala:101:7]
wire io_cpu_perf_canAcceptStoreThenRMW_0; // @[DCache.scala:101:7]
wire io_cpu_perf_canAcceptLoadThenLoad_0; // @[DCache.scala:101:7]
wire io_cpu_perf_storeBufferEmptyAfterLoad_0; // @[DCache.scala:101:7]
wire io_cpu_perf_storeBufferEmptyAfterStore_0; // @[DCache.scala:101:7]
wire io_cpu_s2_nack_0; // @[DCache.scala:101:7]
wire io_cpu_s2_nack_cause_raw_0; // @[DCache.scala:101:7]
wire io_cpu_s2_uncached_0; // @[DCache.scala:101:7]
wire [31:0] io_cpu_s2_paddr_0; // @[DCache.scala:101:7]
wire io_cpu_replay_next_0; // @[DCache.scala:101:7]
wire [39:0] io_cpu_s2_gpa_0; // @[DCache.scala:101:7]
wire io_cpu_ordered_0; // @[DCache.scala:101:7]
wire io_cpu_store_pending_0; // @[DCache.scala:101:7]
wire [26:0] io_ptw_req_bits_bits_addr_0; // @[DCache.scala:101:7]
wire io_ptw_req_bits_bits_need_gpa_0; // @[DCache.scala:101:7]
wire io_ptw_req_valid_0; // @[DCache.scala:101:7]
wire io_errors_bus_valid; // @[DCache.scala:101:7]
wire [31:0] io_errors_bus_bits; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_pf_ld; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_pf_st; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_pf_inst; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_ae_ld; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_ae_st; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_ae_inst; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_ma_ld; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_ma_st; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_miss; // @[DCache.scala:101:7]
wire [31:0] io_tlb_port_s1_resp_paddr; // @[DCache.scala:101:7]
wire [39:0] io_tlb_port_s1_resp_gpa; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_cacheable; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_must_alloc; // @[DCache.scala:101:7]
wire io_tlb_port_s1_resp_prefetchable; // @[DCache.scala:101:7]
wire [1:0] io_tlb_port_s1_resp_size; // @[DCache.scala:101:7]
wire [4:0] io_tlb_port_s1_resp_cmd; // @[DCache.scala:101:7]
wire nodeOut_a_deq_ready = nodeOut_a_ready; // @[Decoupled.scala:356:21]
wire nodeOut_a_deq_valid; // @[Decoupled.scala:356:21]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[DCache.scala:101:7]
wire [2:0] nodeOut_a_deq_bits_opcode; // @[Decoupled.scala:356:21]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[DCache.scala:101:7]
wire [2:0] nodeOut_a_deq_bits_param; // @[Decoupled.scala:356:21]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[DCache.scala:101:7]
wire [3:0] nodeOut_a_deq_bits_size; // @[Decoupled.scala:356:21]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[DCache.scala:101:7]
wire nodeOut_a_deq_bits_source; // @[Decoupled.scala:356:21]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[DCache.scala:101:7]
wire [31:0] nodeOut_a_deq_bits_address; // @[Decoupled.scala:356:21]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[DCache.scala:101:7]
wire [7:0] nodeOut_a_deq_bits_mask; // @[Decoupled.scala:356:21]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[DCache.scala:101:7]
wire [63:0] nodeOut_a_deq_bits_data; // @[Decoupled.scala:356:21]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[DCache.scala:101:7]
wire _nodeOut_b_ready_T_4; // @[DCache.scala:770:44]
assign auto_out_b_ready_0 = nodeOut_b_ready; // @[DCache.scala:101:7]
assign auto_out_c_valid_0 = nodeOut_c_valid; // @[DCache.scala:101:7]
assign auto_out_c_bits_opcode_0 = nodeOut_c_bits_opcode; // @[DCache.scala:101:7]
assign auto_out_c_bits_param_0 = nodeOut_c_bits_param; // @[DCache.scala:101:7]
assign auto_out_c_bits_size_0 = nodeOut_c_bits_size; // @[DCache.scala:101:7]
assign auto_out_c_bits_source_0 = nodeOut_c_bits_source; // @[DCache.scala:101:7]
assign auto_out_c_bits_address_0 = nodeOut_c_bits_address; // @[DCache.scala:101:7]
wire [63:0] s2_data_corrected; // @[package.scala:45:27]
assign auto_out_c_bits_data_0 = nodeOut_c_bits_data; // @[DCache.scala:101:7]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[DCache.scala:101:7]
wire uncachedRespIdxOH_shiftAmount = nodeOut_d_bits_source; // @[OneHot.scala:64:49]
wire [2:0] nodeOut_e_bits_e_sink = nodeOut_d_bits_sink; // @[Edges.scala:451:17]
wire [63:0] s1_uncached_data_word = nodeOut_d_bits_data; // @[package.scala:211:50]
wire _tl_d_data_encoded_T_10 = nodeOut_d_bits_corrupt; // @[DCache.scala:663:77]
assign auto_out_e_valid_0 = nodeOut_e_valid; // @[DCache.scala:101:7]
assign auto_out_e_bits_sink_0 = nodeOut_e_bits_sink; // @[DCache.scala:101:7]
wire [1:0] pma_checker_io_resp_size = pma_checker_io_req_bits_size; // @[DCache.scala:120:32]
wire [4:0] pma_checker_io_resp_cmd = pma_checker_io_req_bits_cmd; // @[DCache.scala:120:32]
wire [31:0] pma_checker__io_resp_paddr_T_1; // @[TLB.scala:652:23]
wire [39:0] pma_checker__io_resp_gpa_T; // @[TLB.scala:659:8]
wire pma_checker__io_resp_pf_ld_T_3; // @[TLB.scala:633:41]
wire pma_checker__io_resp_pf_st_T_3; // @[TLB.scala:634:48]
wire pma_checker__io_resp_pf_inst_T_2; // @[TLB.scala:635:29]
wire pma_checker__io_resp_ae_ld_T_1; // @[TLB.scala:641:41]
wire pma_checker__io_resp_ae_st_T_1; // @[TLB.scala:642:41]
wire pma_checker__io_resp_ae_inst_T_2; // @[TLB.scala:643:41]
wire pma_checker__io_resp_ma_ld_T; // @[TLB.scala:645:31]
wire pma_checker__io_resp_ma_st_T; // @[TLB.scala:646:31]
wire pma_checker__io_resp_cacheable_T_1; // @[TLB.scala:648:41]
wire pma_checker__io_resp_must_alloc_T_1; // @[TLB.scala:649:51]
wire pma_checker__io_resp_prefetchable_T_2; // @[TLB.scala:650:59]
wire [39:0] pma_checker_io_req_bits_vaddr; // @[DCache.scala:120:32]
wire [1:0] pma_checker_io_req_bits_prv; // @[DCache.scala:120:32]
wire pma_checker_io_req_bits_v; // @[DCache.scala:120:32]
wire pma_checker_io_resp_pf_ld; // @[DCache.scala:120:32]
wire pma_checker_io_resp_pf_st; // @[DCache.scala:120:32]
wire pma_checker_io_resp_pf_inst; // @[DCache.scala:120:32]
wire pma_checker_io_resp_ae_ld; // @[DCache.scala:120:32]
wire pma_checker_io_resp_ae_st; // @[DCache.scala:120:32]
wire pma_checker_io_resp_ae_inst; // @[DCache.scala:120:32]
wire pma_checker_io_resp_ma_ld; // @[DCache.scala:120:32]
wire pma_checker_io_resp_ma_st; // @[DCache.scala:120:32]
wire [31:0] pma_checker_io_resp_paddr; // @[DCache.scala:120:32]
wire [39:0] pma_checker_io_resp_gpa; // @[DCache.scala:120:32]
wire pma_checker_io_resp_cacheable; // @[DCache.scala:120:32]
wire pma_checker_io_resp_must_alloc; // @[DCache.scala:120:32]
wire pma_checker_io_resp_prefetchable; // @[DCache.scala:120:32]
wire [26:0] pma_checker_vpn = pma_checker_io_req_bits_vaddr[38:12]; // @[TLB.scala:335:30]
wire [26:0] pma_checker__mpu_ppn_T_24 = pma_checker_vpn; // @[TLB.scala:198:28, :335:30]
wire [26:0] pma_checker__mpu_ppn_T_28 = pma_checker_vpn; // @[TLB.scala:198:28, :335:30]
wire [26:0] pma_checker__sector_hits_T_3 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__sector_hits_T_11 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__sector_hits_T_19 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__sector_hits_T_27 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__sector_hits_T_35 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__sector_hits_T_43 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__sector_hits_T_51 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__sector_hits_T_59 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__superpage_hits_T = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_5 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_10 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_14 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_19 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_24 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_28 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_33 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_38 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_42 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_47 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__superpage_hits_T_52 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__hitsVec_T_6 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__hitsVec_T_12 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__hitsVec_T_18 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__hitsVec_T_24 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__hitsVec_T_30 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__hitsVec_T_36 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__hitsVec_T_42 = pma_checker_vpn; // @[TLB.scala:174:61, :335:30]
wire [26:0] pma_checker__hitsVec_T_48 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_53 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_58 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_63 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_68 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_73 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_78 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_83 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_88 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_93 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_98 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_103 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_108 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_113 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__hitsVec_T_118 = pma_checker_vpn; // @[TLB.scala:183:52, :335:30]
wire [26:0] pma_checker__ppn_T_5 = pma_checker_vpn; // @[TLB.scala:198:28, :335:30]
wire [26:0] pma_checker__ppn_T_13 = pma_checker_vpn; // @[TLB.scala:198:28, :335:30]
wire [26:0] pma_checker__ppn_T_21 = pma_checker_vpn; // @[TLB.scala:198:28, :335:30]
wire [26:0] pma_checker__ppn_T_29 = pma_checker_vpn; // @[TLB.scala:198:28, :335:30]
wire [26:0] pma_checker__ppn_T_33 = pma_checker_vpn; // @[TLB.scala:198:28, :335:30]
wire [26:0] pma_checker__ppn_T_37 = pma_checker_vpn; // @[TLB.scala:198:28, :335:30]
wire pma_checker_priv_s = pma_checker_io_req_bits_prv[0]; // @[TLB.scala:370:20]
wire pma_checker_priv_uses_vm = ~(pma_checker_io_req_bits_prv[1]); // @[TLB.scala:372:27]
wire [19:0] pma_checker__mpu_ppn_T_23; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_22; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_21; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_20; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_19; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_18; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_17; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_16; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_15; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_14; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_13; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_12; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_11; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_10; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_9; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_8; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_7; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_6; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_5; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_4; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_3; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_2; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_T_1; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_1 = pma_checker__mpu_ppn_WIRE_1[0]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_fragmented_superpage = pma_checker__mpu_ppn_T_1; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_2 = pma_checker__mpu_ppn_WIRE_1[1]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_c = pma_checker__mpu_ppn_T_2; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_3 = pma_checker__mpu_ppn_WIRE_1[2]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_eff = pma_checker__mpu_ppn_T_3; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_4 = pma_checker__mpu_ppn_WIRE_1[3]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_paa = pma_checker__mpu_ppn_T_4; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_5 = pma_checker__mpu_ppn_WIRE_1[4]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_pal = pma_checker__mpu_ppn_T_5; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_6 = pma_checker__mpu_ppn_WIRE_1[5]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_ppp = pma_checker__mpu_ppn_T_6; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_7 = pma_checker__mpu_ppn_WIRE_1[6]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_pr = pma_checker__mpu_ppn_T_7; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_8 = pma_checker__mpu_ppn_WIRE_1[7]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_px = pma_checker__mpu_ppn_T_8; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_9 = pma_checker__mpu_ppn_WIRE_1[8]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_pw = pma_checker__mpu_ppn_T_9; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_10 = pma_checker__mpu_ppn_WIRE_1[9]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_hr = pma_checker__mpu_ppn_T_10; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_11 = pma_checker__mpu_ppn_WIRE_1[10]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_hx = pma_checker__mpu_ppn_T_11; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_12 = pma_checker__mpu_ppn_WIRE_1[11]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_hw = pma_checker__mpu_ppn_T_12; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_13 = pma_checker__mpu_ppn_WIRE_1[12]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_sr = pma_checker__mpu_ppn_T_13; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_14 = pma_checker__mpu_ppn_WIRE_1[13]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_sx = pma_checker__mpu_ppn_T_14; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_15 = pma_checker__mpu_ppn_WIRE_1[14]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_sw = pma_checker__mpu_ppn_T_15; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_16 = pma_checker__mpu_ppn_WIRE_1[15]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_gf = pma_checker__mpu_ppn_T_16; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_17 = pma_checker__mpu_ppn_WIRE_1[16]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_pf = pma_checker__mpu_ppn_T_17; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_18 = pma_checker__mpu_ppn_WIRE_1[17]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_ae_stage2 = pma_checker__mpu_ppn_T_18; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_19 = pma_checker__mpu_ppn_WIRE_1[18]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_ae_final = pma_checker__mpu_ppn_T_19; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_20 = pma_checker__mpu_ppn_WIRE_1[19]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_ae_ptw = pma_checker__mpu_ppn_T_20; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_21 = pma_checker__mpu_ppn_WIRE_1[20]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_g = pma_checker__mpu_ppn_T_21; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_22 = pma_checker__mpu_ppn_WIRE_1[21]; // @[TLB.scala:170:77]
wire pma_checker__mpu_ppn_WIRE_u = pma_checker__mpu_ppn_T_22; // @[TLB.scala:170:77]
assign pma_checker__mpu_ppn_T_23 = pma_checker__mpu_ppn_WIRE_1[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__mpu_ppn_WIRE_ppn = pma_checker__mpu_ppn_T_23; // @[TLB.scala:170:77]
wire [1:0] pma_checker_mpu_ppn_res = _pma_checker_mpu_ppn_barrier_io_y_ppn[19:18]; // @[package.scala:267:25]
wire [26:0] pma_checker__mpu_ppn_T_25 = {pma_checker__mpu_ppn_T_24[26:20], pma_checker__mpu_ppn_T_24[19:0] | _pma_checker_mpu_ppn_barrier_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__mpu_ppn_T_26 = pma_checker__mpu_ppn_T_25[17:9]; // @[TLB.scala:198:{47,58}]
wire [10:0] pma_checker__mpu_ppn_T_27 = {pma_checker_mpu_ppn_res, pma_checker__mpu_ppn_T_26}; // @[TLB.scala:195:26, :198:{18,58}]
wire [26:0] pma_checker__mpu_ppn_T_29 = {pma_checker__mpu_ppn_T_28[26:20], pma_checker__mpu_ppn_T_28[19:0] | _pma_checker_mpu_ppn_barrier_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__mpu_ppn_T_30 = pma_checker__mpu_ppn_T_29[8:0]; // @[TLB.scala:198:{47,58}]
wire [19:0] pma_checker__mpu_ppn_T_31 = {pma_checker__mpu_ppn_T_27, pma_checker__mpu_ppn_T_30}; // @[TLB.scala:198:{18,58}]
wire [27:0] pma_checker__mpu_ppn_T_32 = pma_checker_io_req_bits_vaddr[39:12]; // @[TLB.scala:413:146]
wire [27:0] pma_checker__mpu_ppn_T_33 = pma_checker__mpu_ppn_T_32; // @[TLB.scala:413:{20,146}]
wire [27:0] pma_checker_mpu_ppn = pma_checker__mpu_ppn_T_33; // @[TLB.scala:412:20, :413:20]
wire [11:0] pma_checker__mpu_physaddr_T = pma_checker_io_req_bits_vaddr[11:0]; // @[TLB.scala:414:52]
wire [11:0] pma_checker__io_resp_paddr_T = pma_checker_io_req_bits_vaddr[11:0]; // @[TLB.scala:414:52, :652:46]
wire [11:0] pma_checker__io_resp_gpa_offset_T_1 = pma_checker_io_req_bits_vaddr[11:0]; // @[TLB.scala:414:52, :658:82]
wire [39:0] pma_checker_mpu_physaddr = {pma_checker_mpu_ppn, pma_checker__mpu_physaddr_T}; // @[TLB.scala:412:20, :414:{25,52}]
wire [39:0] pma_checker__homogeneous_T = pma_checker_mpu_physaddr; // @[TLB.scala:414:25]
wire [39:0] pma_checker__homogeneous_T_79 = pma_checker_mpu_physaddr; // @[TLB.scala:414:25]
wire [39:0] pma_checker__deny_access_to_debug_T_1 = pma_checker_mpu_physaddr; // @[TLB.scala:414:25]
wire [2:0] pma_checker__mpu_priv_T_2 = {1'h0, pma_checker_io_req_bits_prv}; // @[TLB.scala:415:103]
wire pma_checker_cacheable; // @[TLB.scala:425:41]
wire pma_checker_newEntry_c = pma_checker_cacheable; // @[TLB.scala:425:41, :449:24]
wire [40:0] pma_checker__homogeneous_T_1 = {1'h0, pma_checker__homogeneous_T}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_2 = pma_checker__homogeneous_T_1 & 41'h1FFFFFFE000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_3 = pma_checker__homogeneous_T_2; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_4 = pma_checker__homogeneous_T_3 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire pma_checker__homogeneous_T_60 = pma_checker__homogeneous_T_4; // @[TLBPermissions.scala:101:65]
wire [39:0] _GEN = {pma_checker_mpu_physaddr[39:14], pma_checker_mpu_physaddr[13:0] ^ 14'h3000}; // @[TLB.scala:414:25]
wire [39:0] pma_checker__homogeneous_T_5; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_5 = _GEN; // @[Parameters.scala:137:31]
wire [39:0] pma_checker__homogeneous_T_84; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_84 = _GEN; // @[Parameters.scala:137:31]
wire [40:0] pma_checker__homogeneous_T_6 = {1'h0, pma_checker__homogeneous_T_5}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_7 = pma_checker__homogeneous_T_6 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_8 = pma_checker__homogeneous_T_7; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_9 = pma_checker__homogeneous_T_8 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_0 = {pma_checker_mpu_physaddr[39:17], pma_checker_mpu_physaddr[16:0] ^ 17'h10000}; // @[TLB.scala:414:25]
wire [39:0] pma_checker__homogeneous_T_10; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_10 = _GEN_0; // @[Parameters.scala:137:31]
wire [39:0] pma_checker__homogeneous_T_72; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_72 = _GEN_0; // @[Parameters.scala:137:31]
wire [39:0] pma_checker__homogeneous_T_89; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_89 = _GEN_0; // @[Parameters.scala:137:31]
wire [39:0] pma_checker__homogeneous_T_121; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_121 = _GEN_0; // @[Parameters.scala:137:31]
wire [39:0] pma_checker__homogeneous_T_128; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_128 = _GEN_0; // @[Parameters.scala:137:31]
wire [40:0] pma_checker__homogeneous_T_11 = {1'h0, pma_checker__homogeneous_T_10}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_12 = pma_checker__homogeneous_T_11 & 41'h1FFFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_13 = pma_checker__homogeneous_T_12; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_14 = pma_checker__homogeneous_T_13 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] pma_checker__homogeneous_T_15 = {pma_checker_mpu_physaddr[39:18], pma_checker_mpu_physaddr[17:0] ^ 18'h20000}; // @[TLB.scala:414:25]
wire [40:0] pma_checker__homogeneous_T_16 = {1'h0, pma_checker__homogeneous_T_15}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_17 = pma_checker__homogeneous_T_16 & 41'h1FFFFFFC000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_18 = pma_checker__homogeneous_T_17; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_19 = pma_checker__homogeneous_T_18 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] pma_checker__homogeneous_T_20 = {pma_checker_mpu_physaddr[39:18], pma_checker_mpu_physaddr[17:0] ^ 18'h24000}; // @[TLB.scala:414:25]
wire [40:0] pma_checker__homogeneous_T_21 = {1'h0, pma_checker__homogeneous_T_20}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_22 = pma_checker__homogeneous_T_21 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_23 = pma_checker__homogeneous_T_22; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_24 = pma_checker__homogeneous_T_23 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] pma_checker__homogeneous_T_25 = {pma_checker_mpu_physaddr[39:21], pma_checker_mpu_physaddr[20:0] ^ 21'h100000}; // @[TLB.scala:414:25]
wire [40:0] pma_checker__homogeneous_T_26 = {1'h0, pma_checker__homogeneous_T_25}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_27 = pma_checker__homogeneous_T_26 & 41'h1FFFFFEF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_28 = pma_checker__homogeneous_T_27; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_29 = pma_checker__homogeneous_T_28 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] pma_checker__homogeneous_T_30 = {pma_checker_mpu_physaddr[39:26], pma_checker_mpu_physaddr[25:0] ^ 26'h2000000}; // @[TLB.scala:414:25]
wire [40:0] pma_checker__homogeneous_T_31 = {1'h0, pma_checker__homogeneous_T_30}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_32 = pma_checker__homogeneous_T_31 & 41'h1FFFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_33 = pma_checker__homogeneous_T_32; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_34 = pma_checker__homogeneous_T_33 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] pma_checker__homogeneous_T_35 = {pma_checker_mpu_physaddr[39:26], pma_checker_mpu_physaddr[25:0] ^ 26'h2010000}; // @[TLB.scala:414:25]
wire [40:0] pma_checker__homogeneous_T_36 = {1'h0, pma_checker__homogeneous_T_35}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_37 = pma_checker__homogeneous_T_36 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_38 = pma_checker__homogeneous_T_37; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_39 = pma_checker__homogeneous_T_38 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_1 = {pma_checker_mpu_physaddr[39:28], pma_checker_mpu_physaddr[27:0] ^ 28'h8000000}; // @[TLB.scala:414:25]
wire [39:0] pma_checker__homogeneous_T_40; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_40 = _GEN_1; // @[Parameters.scala:137:31]
wire [39:0] pma_checker__homogeneous_T_94; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_94 = _GEN_1; // @[Parameters.scala:137:31]
wire [39:0] pma_checker__homogeneous_T_109; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_109 = _GEN_1; // @[Parameters.scala:137:31]
wire [40:0] pma_checker__homogeneous_T_41 = {1'h0, pma_checker__homogeneous_T_40}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_42 = pma_checker__homogeneous_T_41 & 41'h1FFFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_43 = pma_checker__homogeneous_T_42; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_44 = pma_checker__homogeneous_T_43 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] pma_checker__homogeneous_T_45 = {pma_checker_mpu_physaddr[39:28], pma_checker_mpu_physaddr[27:0] ^ 28'hC000000}; // @[TLB.scala:414:25]
wire [40:0] pma_checker__homogeneous_T_46 = {1'h0, pma_checker__homogeneous_T_45}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_47 = pma_checker__homogeneous_T_46 & 41'h1FFFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_48 = pma_checker__homogeneous_T_47; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_49 = pma_checker__homogeneous_T_48 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] pma_checker__homogeneous_T_50 = {pma_checker_mpu_physaddr[39:29], pma_checker_mpu_physaddr[28:0] ^ 29'h10020000}; // @[TLB.scala:414:25]
wire [40:0] pma_checker__homogeneous_T_51 = {1'h0, pma_checker__homogeneous_T_50}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_52 = pma_checker__homogeneous_T_51 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_53 = pma_checker__homogeneous_T_52; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_54 = pma_checker__homogeneous_T_53 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_2 = {pma_checker_mpu_physaddr[39:32], pma_checker_mpu_physaddr[31:0] ^ 32'h80000000}; // @[TLB.scala:414:25, :417:15]
wire [39:0] pma_checker__homogeneous_T_55; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_55 = _GEN_2; // @[Parameters.scala:137:31]
wire [39:0] pma_checker__homogeneous_T_99; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_99 = _GEN_2; // @[Parameters.scala:137:31]
wire [39:0] pma_checker__homogeneous_T_114; // @[Parameters.scala:137:31]
assign pma_checker__homogeneous_T_114 = _GEN_2; // @[Parameters.scala:137:31]
wire [40:0] pma_checker__homogeneous_T_56 = {1'h0, pma_checker__homogeneous_T_55}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_57 = pma_checker__homogeneous_T_56 & 41'h1FFF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_58 = pma_checker__homogeneous_T_57; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_59 = pma_checker__homogeneous_T_58 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire pma_checker__homogeneous_T_61 = pma_checker__homogeneous_T_60 | pma_checker__homogeneous_T_9; // @[TLBPermissions.scala:101:65]
wire pma_checker__homogeneous_T_62 = pma_checker__homogeneous_T_61 | pma_checker__homogeneous_T_14; // @[TLBPermissions.scala:101:65]
wire pma_checker__homogeneous_T_63 = pma_checker__homogeneous_T_62 | pma_checker__homogeneous_T_19; // @[TLBPermissions.scala:101:65]
wire pma_checker__homogeneous_T_64 = pma_checker__homogeneous_T_63 | pma_checker__homogeneous_T_24; // @[TLBPermissions.scala:101:65]
wire pma_checker__homogeneous_T_65 = pma_checker__homogeneous_T_64 | pma_checker__homogeneous_T_29; // @[TLBPermissions.scala:101:65]
wire pma_checker__homogeneous_T_66 = pma_checker__homogeneous_T_65 | pma_checker__homogeneous_T_34; // @[TLBPermissions.scala:101:65]
wire pma_checker__homogeneous_T_67 = pma_checker__homogeneous_T_66 | pma_checker__homogeneous_T_39; // @[TLBPermissions.scala:101:65]
wire pma_checker__homogeneous_T_68 = pma_checker__homogeneous_T_67 | pma_checker__homogeneous_T_44; // @[TLBPermissions.scala:101:65]
wire pma_checker__homogeneous_T_69 = pma_checker__homogeneous_T_68 | pma_checker__homogeneous_T_49; // @[TLBPermissions.scala:101:65]
wire pma_checker__homogeneous_T_70 = pma_checker__homogeneous_T_69 | pma_checker__homogeneous_T_54; // @[TLBPermissions.scala:101:65]
wire pma_checker_homogeneous = pma_checker__homogeneous_T_70 | pma_checker__homogeneous_T_59; // @[TLBPermissions.scala:101:65]
wire [40:0] pma_checker__homogeneous_T_73 = {1'h0, pma_checker__homogeneous_T_72}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_74 = pma_checker__homogeneous_T_73 & 41'h8A130000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_75 = pma_checker__homogeneous_T_74; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_76 = pma_checker__homogeneous_T_75 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire pma_checker__homogeneous_T_77 = pma_checker__homogeneous_T_76; // @[TLBPermissions.scala:87:66]
wire pma_checker__homogeneous_T_78 = ~pma_checker__homogeneous_T_77; // @[TLBPermissions.scala:87:{22,66}]
wire [40:0] pma_checker__homogeneous_T_80 = {1'h0, pma_checker__homogeneous_T_79}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_81 = pma_checker__homogeneous_T_80 & 41'hFFFF3000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_82 = pma_checker__homogeneous_T_81; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_83 = pma_checker__homogeneous_T_82 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire pma_checker__homogeneous_T_104 = pma_checker__homogeneous_T_83; // @[TLBPermissions.scala:85:66]
wire [40:0] pma_checker__homogeneous_T_85 = {1'h0, pma_checker__homogeneous_T_84}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_86 = pma_checker__homogeneous_T_85 & 41'hFFFF3000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_87 = pma_checker__homogeneous_T_86; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_88 = pma_checker__homogeneous_T_87 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] pma_checker__homogeneous_T_90 = {1'h0, pma_checker__homogeneous_T_89}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_91 = pma_checker__homogeneous_T_90 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_92 = pma_checker__homogeneous_T_91; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_93 = pma_checker__homogeneous_T_92 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] pma_checker__homogeneous_T_95 = {1'h0, pma_checker__homogeneous_T_94}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_96 = pma_checker__homogeneous_T_95 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_97 = pma_checker__homogeneous_T_96; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_98 = pma_checker__homogeneous_T_97 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] pma_checker__homogeneous_T_100 = {1'h0, pma_checker__homogeneous_T_99}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_101 = pma_checker__homogeneous_T_100 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_102 = pma_checker__homogeneous_T_101; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_103 = pma_checker__homogeneous_T_102 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire pma_checker__homogeneous_T_105 = pma_checker__homogeneous_T_104 | pma_checker__homogeneous_T_88; // @[TLBPermissions.scala:85:66]
wire pma_checker__homogeneous_T_106 = pma_checker__homogeneous_T_105 | pma_checker__homogeneous_T_93; // @[TLBPermissions.scala:85:66]
wire pma_checker__homogeneous_T_107 = pma_checker__homogeneous_T_106 | pma_checker__homogeneous_T_98; // @[TLBPermissions.scala:85:66]
wire pma_checker__homogeneous_T_108 = pma_checker__homogeneous_T_107 | pma_checker__homogeneous_T_103; // @[TLBPermissions.scala:85:66]
wire [40:0] pma_checker__homogeneous_T_110 = {1'h0, pma_checker__homogeneous_T_109}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_111 = pma_checker__homogeneous_T_110 & 41'h8E020000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_112 = pma_checker__homogeneous_T_111; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_113 = pma_checker__homogeneous_T_112 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire pma_checker__homogeneous_T_119 = pma_checker__homogeneous_T_113; // @[TLBPermissions.scala:85:66]
wire [40:0] pma_checker__homogeneous_T_115 = {1'h0, pma_checker__homogeneous_T_114}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_116 = pma_checker__homogeneous_T_115 & 41'h80000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_117 = pma_checker__homogeneous_T_116; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_118 = pma_checker__homogeneous_T_117 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire pma_checker__homogeneous_T_120 = pma_checker__homogeneous_T_119 | pma_checker__homogeneous_T_118; // @[TLBPermissions.scala:85:66]
wire [40:0] pma_checker__homogeneous_T_122 = {1'h0, pma_checker__homogeneous_T_121}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_123 = pma_checker__homogeneous_T_122 & 41'h8A130000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_124 = pma_checker__homogeneous_T_123; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_125 = pma_checker__homogeneous_T_124 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire pma_checker__homogeneous_T_126 = pma_checker__homogeneous_T_125; // @[TLBPermissions.scala:87:66]
wire pma_checker__homogeneous_T_127 = ~pma_checker__homogeneous_T_126; // @[TLBPermissions.scala:87:{22,66}]
wire [40:0] pma_checker__homogeneous_T_129 = {1'h0, pma_checker__homogeneous_T_128}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__homogeneous_T_130 = pma_checker__homogeneous_T_129 & 41'h8A130000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__homogeneous_T_131 = pma_checker__homogeneous_T_130; // @[Parameters.scala:137:46]
wire pma_checker__homogeneous_T_132 = pma_checker__homogeneous_T_131 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire pma_checker__homogeneous_T_133 = pma_checker__homogeneous_T_132; // @[TLBPermissions.scala:87:66]
wire pma_checker__homogeneous_T_134 = ~pma_checker__homogeneous_T_133; // @[TLBPermissions.scala:87:{22,66}]
wire [40:0] pma_checker__deny_access_to_debug_T_2 = {1'h0, pma_checker__deny_access_to_debug_T_1}; // @[Parameters.scala:137:{31,41}]
wire [40:0] pma_checker__deny_access_to_debug_T_3 = pma_checker__deny_access_to_debug_T_2 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}]
wire [40:0] pma_checker__deny_access_to_debug_T_4 = pma_checker__deny_access_to_debug_T_3; // @[Parameters.scala:137:46]
wire pma_checker__deny_access_to_debug_T_5 = pma_checker__deny_access_to_debug_T_4 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire pma_checker_deny_access_to_debug = pma_checker__deny_access_to_debug_T_5; // @[TLB.scala:428:50]
wire pma_checker__prot_r_T = ~pma_checker_deny_access_to_debug; // @[TLB.scala:428:50, :429:33]
wire pma_checker__prot_r_T_1 = _pma_checker_pma_io_resp_r & pma_checker__prot_r_T; // @[TLB.scala:422:19, :429:{30,33}]
wire pma_checker_prot_r = pma_checker__prot_r_T_1; // @[TLB.scala:429:{30,55}]
wire pma_checker_newEntry_pr = pma_checker_prot_r; // @[TLB.scala:429:55, :449:24]
wire pma_checker__prot_w_T = ~pma_checker_deny_access_to_debug; // @[TLB.scala:428:50, :429:33, :430:33]
wire pma_checker__prot_w_T_1 = _pma_checker_pma_io_resp_w & pma_checker__prot_w_T; // @[TLB.scala:422:19, :430:{30,33}]
wire pma_checker_prot_w = pma_checker__prot_w_T_1; // @[TLB.scala:430:{30,55}]
wire pma_checker_newEntry_pw = pma_checker_prot_w; // @[TLB.scala:430:55, :449:24]
wire pma_checker__prot_x_T = ~pma_checker_deny_access_to_debug; // @[TLB.scala:428:50, :429:33, :434:33]
wire pma_checker__prot_x_T_1 = _pma_checker_pma_io_resp_x & pma_checker__prot_x_T; // @[TLB.scala:422:19, :434:{30,33}]
wire pma_checker_prot_x = pma_checker__prot_x_T_1; // @[TLB.scala:434:{30,55}]
wire pma_checker_newEntry_px = pma_checker_prot_x; // @[TLB.scala:434:55, :449:24]
wire [24:0] pma_checker__sector_hits_T_4 = pma_checker__sector_hits_T_3[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__sector_hits_T_5 = pma_checker__sector_hits_T_4 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__sector_hits_T_7 = pma_checker__sector_hits_T_5 & pma_checker__sector_hits_T_6; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__sector_hits_T_12 = pma_checker__sector_hits_T_11[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__sector_hits_T_13 = pma_checker__sector_hits_T_12 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__sector_hits_T_15 = pma_checker__sector_hits_T_13 & pma_checker__sector_hits_T_14; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__sector_hits_T_20 = pma_checker__sector_hits_T_19[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__sector_hits_T_21 = pma_checker__sector_hits_T_20 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__sector_hits_T_23 = pma_checker__sector_hits_T_21 & pma_checker__sector_hits_T_22; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__sector_hits_T_28 = pma_checker__sector_hits_T_27[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__sector_hits_T_29 = pma_checker__sector_hits_T_28 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__sector_hits_T_31 = pma_checker__sector_hits_T_29 & pma_checker__sector_hits_T_30; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__sector_hits_T_36 = pma_checker__sector_hits_T_35[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__sector_hits_T_37 = pma_checker__sector_hits_T_36 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__sector_hits_T_39 = pma_checker__sector_hits_T_37 & pma_checker__sector_hits_T_38; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__sector_hits_T_44 = pma_checker__sector_hits_T_43[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__sector_hits_T_45 = pma_checker__sector_hits_T_44 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__sector_hits_T_47 = pma_checker__sector_hits_T_45 & pma_checker__sector_hits_T_46; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__sector_hits_T_52 = pma_checker__sector_hits_T_51[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__sector_hits_T_53 = pma_checker__sector_hits_T_52 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__sector_hits_T_55 = pma_checker__sector_hits_T_53 & pma_checker__sector_hits_T_54; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__sector_hits_T_60 = pma_checker__sector_hits_T_59[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__sector_hits_T_61 = pma_checker__sector_hits_T_60 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__sector_hits_T_63 = pma_checker__sector_hits_T_61 & pma_checker__sector_hits_T_62; // @[TLB.scala:174:{86,95,105}]
wire [8:0] pma_checker__superpage_hits_T_1 = pma_checker__superpage_hits_T[26:18]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_2 = pma_checker__superpage_hits_T_1 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__superpage_hits_T_3 = pma_checker__superpage_hits_T_2; // @[TLB.scala:183:{40,79}]
wire pma_checker_superpage_hits_ignore_1 = pma_checker__superpage_hits_ignore_T_1; // @[TLB.scala:182:{28,34}]
wire [8:0] pma_checker__superpage_hits_T_6 = pma_checker__superpage_hits_T_5[17:9]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_7 = pma_checker__superpage_hits_T_6 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__superpage_hits_T_8 = pma_checker_superpage_hits_ignore_1 | pma_checker__superpage_hits_T_7; // @[TLB.scala:182:34, :183:{40,79}]
wire [8:0] pma_checker__superpage_hits_T_11 = pma_checker__superpage_hits_T_10[8:0]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_12 = pma_checker__superpage_hits_T_11 == 9'h0; // @[TLB.scala:183:{58,79}]
wire [8:0] pma_checker__superpage_hits_T_15 = pma_checker__superpage_hits_T_14[26:18]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_16 = pma_checker__superpage_hits_T_15 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__superpage_hits_T_17 = pma_checker__superpage_hits_T_16; // @[TLB.scala:183:{40,79}]
wire pma_checker_superpage_hits_ignore_4 = pma_checker__superpage_hits_ignore_T_4; // @[TLB.scala:182:{28,34}]
wire [8:0] pma_checker__superpage_hits_T_20 = pma_checker__superpage_hits_T_19[17:9]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_21 = pma_checker__superpage_hits_T_20 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__superpage_hits_T_22 = pma_checker_superpage_hits_ignore_4 | pma_checker__superpage_hits_T_21; // @[TLB.scala:182:34, :183:{40,79}]
wire [8:0] pma_checker__superpage_hits_T_25 = pma_checker__superpage_hits_T_24[8:0]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_26 = pma_checker__superpage_hits_T_25 == 9'h0; // @[TLB.scala:183:{58,79}]
wire [8:0] pma_checker__superpage_hits_T_29 = pma_checker__superpage_hits_T_28[26:18]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_30 = pma_checker__superpage_hits_T_29 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__superpage_hits_T_31 = pma_checker__superpage_hits_T_30; // @[TLB.scala:183:{40,79}]
wire pma_checker_superpage_hits_ignore_7 = pma_checker__superpage_hits_ignore_T_7; // @[TLB.scala:182:{28,34}]
wire [8:0] pma_checker__superpage_hits_T_34 = pma_checker__superpage_hits_T_33[17:9]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_35 = pma_checker__superpage_hits_T_34 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__superpage_hits_T_36 = pma_checker_superpage_hits_ignore_7 | pma_checker__superpage_hits_T_35; // @[TLB.scala:182:34, :183:{40,79}]
wire [8:0] pma_checker__superpage_hits_T_39 = pma_checker__superpage_hits_T_38[8:0]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_40 = pma_checker__superpage_hits_T_39 == 9'h0; // @[TLB.scala:183:{58,79}]
wire [8:0] pma_checker__superpage_hits_T_43 = pma_checker__superpage_hits_T_42[26:18]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_44 = pma_checker__superpage_hits_T_43 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__superpage_hits_T_45 = pma_checker__superpage_hits_T_44; // @[TLB.scala:183:{40,79}]
wire pma_checker_superpage_hits_ignore_10 = pma_checker__superpage_hits_ignore_T_10; // @[TLB.scala:182:{28,34}]
wire [8:0] pma_checker__superpage_hits_T_48 = pma_checker__superpage_hits_T_47[17:9]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_49 = pma_checker__superpage_hits_T_48 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__superpage_hits_T_50 = pma_checker_superpage_hits_ignore_10 | pma_checker__superpage_hits_T_49; // @[TLB.scala:182:34, :183:{40,79}]
wire [8:0] pma_checker__superpage_hits_T_53 = pma_checker__superpage_hits_T_52[8:0]; // @[TLB.scala:183:{52,58}]
wire pma_checker__superpage_hits_T_54 = pma_checker__superpage_hits_T_53 == 9'h0; // @[TLB.scala:183:{58,79}]
wire [1:0] pma_checker_hitsVec_idx = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker_hitsVec_idx_1 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker_hitsVec_idx_2 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker_hitsVec_idx_3 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker_hitsVec_idx_4 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker_hitsVec_idx_5 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker_hitsVec_idx_6 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker_hitsVec_idx_7 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker__entries_T = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker__entries_T_24 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker__entries_T_48 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker__entries_T_72 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker__entries_T_96 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker__entries_T_120 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker__entries_T_144 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [1:0] pma_checker__entries_T_168 = pma_checker_vpn[1:0]; // @[package.scala:163:13]
wire [24:0] pma_checker__hitsVec_T_1 = pma_checker__hitsVec_T[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__hitsVec_T_2 = pma_checker__hitsVec_T_1 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__hitsVec_T_4 = pma_checker__hitsVec_T_2 & pma_checker__hitsVec_T_3; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__hitsVec_T_7 = pma_checker__hitsVec_T_6[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__hitsVec_T_8 = pma_checker__hitsVec_T_7 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__hitsVec_T_10 = pma_checker__hitsVec_T_8 & pma_checker__hitsVec_T_9; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__hitsVec_T_13 = pma_checker__hitsVec_T_12[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__hitsVec_T_14 = pma_checker__hitsVec_T_13 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__hitsVec_T_16 = pma_checker__hitsVec_T_14 & pma_checker__hitsVec_T_15; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__hitsVec_T_19 = pma_checker__hitsVec_T_18[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__hitsVec_T_20 = pma_checker__hitsVec_T_19 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__hitsVec_T_22 = pma_checker__hitsVec_T_20 & pma_checker__hitsVec_T_21; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__hitsVec_T_25 = pma_checker__hitsVec_T_24[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__hitsVec_T_26 = pma_checker__hitsVec_T_25 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__hitsVec_T_28 = pma_checker__hitsVec_T_26 & pma_checker__hitsVec_T_27; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__hitsVec_T_31 = pma_checker__hitsVec_T_30[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__hitsVec_T_32 = pma_checker__hitsVec_T_31 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__hitsVec_T_34 = pma_checker__hitsVec_T_32 & pma_checker__hitsVec_T_33; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__hitsVec_T_37 = pma_checker__hitsVec_T_36[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__hitsVec_T_38 = pma_checker__hitsVec_T_37 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__hitsVec_T_40 = pma_checker__hitsVec_T_38 & pma_checker__hitsVec_T_39; // @[TLB.scala:174:{86,95,105}]
wire [24:0] pma_checker__hitsVec_T_43 = pma_checker__hitsVec_T_42[26:2]; // @[TLB.scala:174:{61,68}]
wire pma_checker__hitsVec_T_44 = pma_checker__hitsVec_T_43 == 25'h0; // @[TLB.scala:174:{68,86}]
wire pma_checker__hitsVec_T_46 = pma_checker__hitsVec_T_44 & pma_checker__hitsVec_T_45; // @[TLB.scala:174:{86,95,105}]
wire [8:0] pma_checker__hitsVec_T_49 = pma_checker__hitsVec_T_48[26:18]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_50 = pma_checker__hitsVec_T_49 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__hitsVec_T_51 = pma_checker__hitsVec_T_50; // @[TLB.scala:183:{40,79}]
wire pma_checker_hitsVec_ignore_1 = pma_checker__hitsVec_ignore_T_1; // @[TLB.scala:182:{28,34}]
wire [8:0] pma_checker__hitsVec_T_54 = pma_checker__hitsVec_T_53[17:9]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_55 = pma_checker__hitsVec_T_54 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__hitsVec_T_56 = pma_checker_hitsVec_ignore_1 | pma_checker__hitsVec_T_55; // @[TLB.scala:182:34, :183:{40,79}]
wire [8:0] pma_checker__hitsVec_T_59 = pma_checker__hitsVec_T_58[8:0]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_60 = pma_checker__hitsVec_T_59 == 9'h0; // @[TLB.scala:183:{58,79}]
wire [8:0] pma_checker__hitsVec_T_64 = pma_checker__hitsVec_T_63[26:18]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_65 = pma_checker__hitsVec_T_64 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__hitsVec_T_66 = pma_checker__hitsVec_T_65; // @[TLB.scala:183:{40,79}]
wire pma_checker_hitsVec_ignore_4 = pma_checker__hitsVec_ignore_T_4; // @[TLB.scala:182:{28,34}]
wire [8:0] pma_checker__hitsVec_T_69 = pma_checker__hitsVec_T_68[17:9]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_70 = pma_checker__hitsVec_T_69 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__hitsVec_T_71 = pma_checker_hitsVec_ignore_4 | pma_checker__hitsVec_T_70; // @[TLB.scala:182:34, :183:{40,79}]
wire [8:0] pma_checker__hitsVec_T_74 = pma_checker__hitsVec_T_73[8:0]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_75 = pma_checker__hitsVec_T_74 == 9'h0; // @[TLB.scala:183:{58,79}]
wire [8:0] pma_checker__hitsVec_T_79 = pma_checker__hitsVec_T_78[26:18]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_80 = pma_checker__hitsVec_T_79 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__hitsVec_T_81 = pma_checker__hitsVec_T_80; // @[TLB.scala:183:{40,79}]
wire pma_checker_hitsVec_ignore_7 = pma_checker__hitsVec_ignore_T_7; // @[TLB.scala:182:{28,34}]
wire [8:0] pma_checker__hitsVec_T_84 = pma_checker__hitsVec_T_83[17:9]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_85 = pma_checker__hitsVec_T_84 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__hitsVec_T_86 = pma_checker_hitsVec_ignore_7 | pma_checker__hitsVec_T_85; // @[TLB.scala:182:34, :183:{40,79}]
wire [8:0] pma_checker__hitsVec_T_89 = pma_checker__hitsVec_T_88[8:0]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_90 = pma_checker__hitsVec_T_89 == 9'h0; // @[TLB.scala:183:{58,79}]
wire [8:0] pma_checker__hitsVec_T_94 = pma_checker__hitsVec_T_93[26:18]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_95 = pma_checker__hitsVec_T_94 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__hitsVec_T_96 = pma_checker__hitsVec_T_95; // @[TLB.scala:183:{40,79}]
wire pma_checker_hitsVec_ignore_10 = pma_checker__hitsVec_ignore_T_10; // @[TLB.scala:182:{28,34}]
wire [8:0] pma_checker__hitsVec_T_99 = pma_checker__hitsVec_T_98[17:9]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_100 = pma_checker__hitsVec_T_99 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__hitsVec_T_101 = pma_checker_hitsVec_ignore_10 | pma_checker__hitsVec_T_100; // @[TLB.scala:182:34, :183:{40,79}]
wire [8:0] pma_checker__hitsVec_T_104 = pma_checker__hitsVec_T_103[8:0]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_105 = pma_checker__hitsVec_T_104 == 9'h0; // @[TLB.scala:183:{58,79}]
wire [8:0] pma_checker__hitsVec_T_109 = pma_checker__hitsVec_T_108[26:18]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_110 = pma_checker__hitsVec_T_109 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker__hitsVec_T_111 = pma_checker__hitsVec_T_110; // @[TLB.scala:183:{40,79}]
wire [8:0] pma_checker__hitsVec_T_114 = pma_checker__hitsVec_T_113[17:9]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_115 = pma_checker__hitsVec_T_114 == 9'h0; // @[TLB.scala:183:{58,79}]
wire [8:0] pma_checker__hitsVec_T_119 = pma_checker__hitsVec_T_118[8:0]; // @[TLB.scala:183:{52,58}]
wire pma_checker__hitsVec_T_120 = pma_checker__hitsVec_T_119 == 9'h0; // @[TLB.scala:183:{58,79}]
wire pma_checker_newEntry_ppp; // @[TLB.scala:449:24]
wire pma_checker_newEntry_pal; // @[TLB.scala:449:24]
wire pma_checker_newEntry_paa; // @[TLB.scala:449:24]
wire pma_checker_newEntry_eff; // @[TLB.scala:449:24]
wire [1:0] _GEN_3 = {pma_checker_newEntry_c, 1'h0}; // @[TLB.scala:217:24, :449:24]
wire [1:0] pma_checker_special_entry_data_0_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_special_entry_data_0_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_0_data_0_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_0_data_0_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_1_data_0_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_1_data_0_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_2_data_0_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_2_data_0_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_3_data_0_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_3_data_0_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_0_data_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_0_data_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_1_data_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_1_data_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_2_data_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_2_data_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_3_data_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_3_data_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_4_data_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_4_data_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_5_data_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_5_data_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_6_data_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_6_data_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_7_data_lo_lo_lo; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_7_data_lo_lo_lo = _GEN_3; // @[TLB.scala:217:24]
wire [1:0] _GEN_4 = {pma_checker_newEntry_pal, pma_checker_newEntry_paa}; // @[TLB.scala:217:24, :449:24]
wire [1:0] pma_checker_special_entry_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_special_entry_data_0_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_0_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_0_data_0_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_1_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_1_data_0_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_2_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_2_data_0_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_3_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_3_data_0_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_0_data_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_0_data_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_1_data_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_1_data_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_2_data_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_2_data_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_3_data_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_3_data_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_4_data_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_4_data_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_5_data_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_5_data_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_6_data_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_6_data_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_7_data_lo_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_7_data_lo_lo_hi_hi = _GEN_4; // @[TLB.scala:217:24]
wire [2:0] pma_checker_special_entry_data_0_lo_lo_hi = {pma_checker_special_entry_data_0_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_special_entry_data_0_lo_lo = {pma_checker_special_entry_data_0_lo_lo_hi, pma_checker_special_entry_data_0_lo_lo_lo}; // @[TLB.scala:217:24]
wire [1:0] _GEN_5 = {pma_checker_newEntry_px, pma_checker_newEntry_pr}; // @[TLB.scala:217:24, :449:24]
wire [1:0] pma_checker_special_entry_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_special_entry_data_0_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_0_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_0_data_0_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_1_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_1_data_0_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_2_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_2_data_0_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_superpage_entries_3_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_3_data_0_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_0_data_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_0_data_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_1_data_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_1_data_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_2_data_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_2_data_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_3_data_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_3_data_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_4_data_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_4_data_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_5_data_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_5_data_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_6_data_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_6_data_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [1:0] pma_checker_sectored_entries_0_7_data_lo_hi_lo_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_7_data_lo_hi_lo_hi = _GEN_5; // @[TLB.scala:217:24]
wire [2:0] pma_checker_special_entry_data_0_lo_hi_lo = {pma_checker_special_entry_data_0_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [2:0] _GEN_6 = {2'h0, pma_checker_newEntry_pw}; // @[TLB.scala:217:24, :449:24]
wire [2:0] pma_checker_special_entry_data_0_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_special_entry_data_0_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_0_data_0_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_0_data_0_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_1_data_0_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_1_data_0_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_2_data_0_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_2_data_0_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_3_data_0_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_superpage_entries_3_data_0_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_0_data_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_0_data_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_1_data_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_1_data_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_2_data_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_2_data_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_3_data_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_3_data_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_4_data_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_4_data_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_5_data_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_5_data_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_6_data_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_6_data_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_7_data_lo_hi_hi; // @[TLB.scala:217:24]
assign pma_checker_sectored_entries_0_7_data_lo_hi_hi = _GEN_6; // @[TLB.scala:217:24]
wire [5:0] pma_checker_special_entry_data_0_lo_hi = {pma_checker_special_entry_data_0_lo_hi_hi, pma_checker_special_entry_data_0_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_special_entry_data_0_lo = {pma_checker_special_entry_data_0_lo_hi, pma_checker_special_entry_data_0_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__special_entry_data_0_T = {31'h0, pma_checker_special_entry_data_0_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_0_data_0_lo_lo_hi = {pma_checker_superpage_entries_0_data_0_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_superpage_entries_0_data_0_lo_lo = {pma_checker_superpage_entries_0_data_0_lo_lo_hi, pma_checker_superpage_entries_0_data_0_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_0_data_0_lo_hi_lo = {pma_checker_superpage_entries_0_data_0_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_superpage_entries_0_data_0_lo_hi = {pma_checker_superpage_entries_0_data_0_lo_hi_hi, pma_checker_superpage_entries_0_data_0_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_superpage_entries_0_data_0_lo = {pma_checker_superpage_entries_0_data_0_lo_hi, pma_checker_superpage_entries_0_data_0_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__superpage_entries_0_data_0_T = {31'h0, pma_checker_superpage_entries_0_data_0_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_1_data_0_lo_lo_hi = {pma_checker_superpage_entries_1_data_0_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_superpage_entries_1_data_0_lo_lo = {pma_checker_superpage_entries_1_data_0_lo_lo_hi, pma_checker_superpage_entries_1_data_0_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_1_data_0_lo_hi_lo = {pma_checker_superpage_entries_1_data_0_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_superpage_entries_1_data_0_lo_hi = {pma_checker_superpage_entries_1_data_0_lo_hi_hi, pma_checker_superpage_entries_1_data_0_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_superpage_entries_1_data_0_lo = {pma_checker_superpage_entries_1_data_0_lo_hi, pma_checker_superpage_entries_1_data_0_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__superpage_entries_1_data_0_T = {31'h0, pma_checker_superpage_entries_1_data_0_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_2_data_0_lo_lo_hi = {pma_checker_superpage_entries_2_data_0_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_superpage_entries_2_data_0_lo_lo = {pma_checker_superpage_entries_2_data_0_lo_lo_hi, pma_checker_superpage_entries_2_data_0_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_2_data_0_lo_hi_lo = {pma_checker_superpage_entries_2_data_0_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_superpage_entries_2_data_0_lo_hi = {pma_checker_superpage_entries_2_data_0_lo_hi_hi, pma_checker_superpage_entries_2_data_0_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_superpage_entries_2_data_0_lo = {pma_checker_superpage_entries_2_data_0_lo_hi, pma_checker_superpage_entries_2_data_0_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__superpage_entries_2_data_0_T = {31'h0, pma_checker_superpage_entries_2_data_0_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_3_data_0_lo_lo_hi = {pma_checker_superpage_entries_3_data_0_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_superpage_entries_3_data_0_lo_lo = {pma_checker_superpage_entries_3_data_0_lo_lo_hi, pma_checker_superpage_entries_3_data_0_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_superpage_entries_3_data_0_lo_hi_lo = {pma_checker_superpage_entries_3_data_0_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_superpage_entries_3_data_0_lo_hi = {pma_checker_superpage_entries_3_data_0_lo_hi_hi, pma_checker_superpage_entries_3_data_0_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_superpage_entries_3_data_0_lo = {pma_checker_superpage_entries_3_data_0_lo_hi, pma_checker_superpage_entries_3_data_0_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__superpage_entries_3_data_0_T = {31'h0, pma_checker_superpage_entries_3_data_0_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_0_data_lo_lo_hi = {pma_checker_sectored_entries_0_0_data_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_sectored_entries_0_0_data_lo_lo = {pma_checker_sectored_entries_0_0_data_lo_lo_hi, pma_checker_sectored_entries_0_0_data_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_0_data_lo_hi_lo = {pma_checker_sectored_entries_0_0_data_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_sectored_entries_0_0_data_lo_hi = {pma_checker_sectored_entries_0_0_data_lo_hi_hi, pma_checker_sectored_entries_0_0_data_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_sectored_entries_0_0_data_lo = {pma_checker_sectored_entries_0_0_data_lo_hi, pma_checker_sectored_entries_0_0_data_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__sectored_entries_0_0_data_T = {31'h0, pma_checker_sectored_entries_0_0_data_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_1_data_lo_lo_hi = {pma_checker_sectored_entries_0_1_data_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_sectored_entries_0_1_data_lo_lo = {pma_checker_sectored_entries_0_1_data_lo_lo_hi, pma_checker_sectored_entries_0_1_data_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_1_data_lo_hi_lo = {pma_checker_sectored_entries_0_1_data_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_sectored_entries_0_1_data_lo_hi = {pma_checker_sectored_entries_0_1_data_lo_hi_hi, pma_checker_sectored_entries_0_1_data_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_sectored_entries_0_1_data_lo = {pma_checker_sectored_entries_0_1_data_lo_hi, pma_checker_sectored_entries_0_1_data_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__sectored_entries_0_1_data_T = {31'h0, pma_checker_sectored_entries_0_1_data_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_2_data_lo_lo_hi = {pma_checker_sectored_entries_0_2_data_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_sectored_entries_0_2_data_lo_lo = {pma_checker_sectored_entries_0_2_data_lo_lo_hi, pma_checker_sectored_entries_0_2_data_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_2_data_lo_hi_lo = {pma_checker_sectored_entries_0_2_data_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_sectored_entries_0_2_data_lo_hi = {pma_checker_sectored_entries_0_2_data_lo_hi_hi, pma_checker_sectored_entries_0_2_data_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_sectored_entries_0_2_data_lo = {pma_checker_sectored_entries_0_2_data_lo_hi, pma_checker_sectored_entries_0_2_data_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__sectored_entries_0_2_data_T = {31'h0, pma_checker_sectored_entries_0_2_data_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_3_data_lo_lo_hi = {pma_checker_sectored_entries_0_3_data_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_sectored_entries_0_3_data_lo_lo = {pma_checker_sectored_entries_0_3_data_lo_lo_hi, pma_checker_sectored_entries_0_3_data_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_3_data_lo_hi_lo = {pma_checker_sectored_entries_0_3_data_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_sectored_entries_0_3_data_lo_hi = {pma_checker_sectored_entries_0_3_data_lo_hi_hi, pma_checker_sectored_entries_0_3_data_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_sectored_entries_0_3_data_lo = {pma_checker_sectored_entries_0_3_data_lo_hi, pma_checker_sectored_entries_0_3_data_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__sectored_entries_0_3_data_T = {31'h0, pma_checker_sectored_entries_0_3_data_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_4_data_lo_lo_hi = {pma_checker_sectored_entries_0_4_data_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_sectored_entries_0_4_data_lo_lo = {pma_checker_sectored_entries_0_4_data_lo_lo_hi, pma_checker_sectored_entries_0_4_data_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_4_data_lo_hi_lo = {pma_checker_sectored_entries_0_4_data_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_sectored_entries_0_4_data_lo_hi = {pma_checker_sectored_entries_0_4_data_lo_hi_hi, pma_checker_sectored_entries_0_4_data_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_sectored_entries_0_4_data_lo = {pma_checker_sectored_entries_0_4_data_lo_hi, pma_checker_sectored_entries_0_4_data_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__sectored_entries_0_4_data_T = {31'h0, pma_checker_sectored_entries_0_4_data_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_5_data_lo_lo_hi = {pma_checker_sectored_entries_0_5_data_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_sectored_entries_0_5_data_lo_lo = {pma_checker_sectored_entries_0_5_data_lo_lo_hi, pma_checker_sectored_entries_0_5_data_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_5_data_lo_hi_lo = {pma_checker_sectored_entries_0_5_data_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_sectored_entries_0_5_data_lo_hi = {pma_checker_sectored_entries_0_5_data_lo_hi_hi, pma_checker_sectored_entries_0_5_data_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_sectored_entries_0_5_data_lo = {pma_checker_sectored_entries_0_5_data_lo_hi, pma_checker_sectored_entries_0_5_data_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__sectored_entries_0_5_data_T = {31'h0, pma_checker_sectored_entries_0_5_data_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_6_data_lo_lo_hi = {pma_checker_sectored_entries_0_6_data_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_sectored_entries_0_6_data_lo_lo = {pma_checker_sectored_entries_0_6_data_lo_lo_hi, pma_checker_sectored_entries_0_6_data_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_6_data_lo_hi_lo = {pma_checker_sectored_entries_0_6_data_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_sectored_entries_0_6_data_lo_hi = {pma_checker_sectored_entries_0_6_data_lo_hi_hi, pma_checker_sectored_entries_0_6_data_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_sectored_entries_0_6_data_lo = {pma_checker_sectored_entries_0_6_data_lo_hi, pma_checker_sectored_entries_0_6_data_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__sectored_entries_0_6_data_T = {31'h0, pma_checker_sectored_entries_0_6_data_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_7_data_lo_lo_hi = {pma_checker_sectored_entries_0_7_data_lo_lo_hi_hi, pma_checker_newEntry_eff}; // @[TLB.scala:217:24, :449:24]
wire [4:0] pma_checker_sectored_entries_0_7_data_lo_lo = {pma_checker_sectored_entries_0_7_data_lo_lo_hi, pma_checker_sectored_entries_0_7_data_lo_lo_lo}; // @[TLB.scala:217:24]
wire [2:0] pma_checker_sectored_entries_0_7_data_lo_hi_lo = {pma_checker_sectored_entries_0_7_data_lo_hi_lo_hi, pma_checker_newEntry_ppp}; // @[TLB.scala:217:24, :449:24]
wire [5:0] pma_checker_sectored_entries_0_7_data_lo_hi = {pma_checker_sectored_entries_0_7_data_lo_hi_hi, pma_checker_sectored_entries_0_7_data_lo_hi_lo}; // @[TLB.scala:217:24]
wire [10:0] pma_checker_sectored_entries_0_7_data_lo = {pma_checker_sectored_entries_0_7_data_lo_hi, pma_checker_sectored_entries_0_7_data_lo_lo}; // @[TLB.scala:217:24]
wire [41:0] pma_checker__sectored_entries_0_7_data_T = {31'h0, pma_checker_sectored_entries_0_7_data_lo}; // @[TLB.scala:217:24]
wire [19:0] pma_checker__entries_T_23; // @[TLB.scala:170:77]
wire pma_checker__entries_T_22; // @[TLB.scala:170:77]
wire pma_checker__entries_T_21; // @[TLB.scala:170:77]
wire pma_checker__entries_T_20; // @[TLB.scala:170:77]
wire pma_checker__entries_T_19; // @[TLB.scala:170:77]
wire pma_checker__entries_T_18; // @[TLB.scala:170:77]
wire pma_checker__entries_T_17; // @[TLB.scala:170:77]
wire pma_checker__entries_T_16; // @[TLB.scala:170:77]
wire pma_checker__entries_T_15; // @[TLB.scala:170:77]
wire pma_checker__entries_T_14; // @[TLB.scala:170:77]
wire pma_checker__entries_T_13; // @[TLB.scala:170:77]
wire pma_checker__entries_T_12; // @[TLB.scala:170:77]
wire pma_checker__entries_T_11; // @[TLB.scala:170:77]
wire pma_checker__entries_T_10; // @[TLB.scala:170:77]
wire pma_checker__entries_T_9; // @[TLB.scala:170:77]
wire pma_checker__entries_T_8; // @[TLB.scala:170:77]
wire pma_checker__entries_T_7; // @[TLB.scala:170:77]
wire pma_checker__entries_T_6; // @[TLB.scala:170:77]
wire pma_checker__entries_T_5; // @[TLB.scala:170:77]
wire pma_checker__entries_T_4; // @[TLB.scala:170:77]
wire pma_checker__entries_T_3; // @[TLB.scala:170:77]
wire pma_checker__entries_T_2; // @[TLB.scala:170:77]
wire pma_checker__entries_T_1; // @[TLB.scala:170:77]
assign pma_checker__entries_T_1 = pma_checker__entries_WIRE_1[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_fragmented_superpage = pma_checker__entries_T_1; // @[TLB.scala:170:77]
assign pma_checker__entries_T_2 = pma_checker__entries_WIRE_1[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_c = pma_checker__entries_T_2; // @[TLB.scala:170:77]
assign pma_checker__entries_T_3 = pma_checker__entries_WIRE_1[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_eff = pma_checker__entries_T_3; // @[TLB.scala:170:77]
assign pma_checker__entries_T_4 = pma_checker__entries_WIRE_1[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_paa = pma_checker__entries_T_4; // @[TLB.scala:170:77]
assign pma_checker__entries_T_5 = pma_checker__entries_WIRE_1[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_pal = pma_checker__entries_T_5; // @[TLB.scala:170:77]
assign pma_checker__entries_T_6 = pma_checker__entries_WIRE_1[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_ppp = pma_checker__entries_T_6; // @[TLB.scala:170:77]
assign pma_checker__entries_T_7 = pma_checker__entries_WIRE_1[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_pr = pma_checker__entries_T_7; // @[TLB.scala:170:77]
assign pma_checker__entries_T_8 = pma_checker__entries_WIRE_1[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_px = pma_checker__entries_T_8; // @[TLB.scala:170:77]
assign pma_checker__entries_T_9 = pma_checker__entries_WIRE_1[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_pw = pma_checker__entries_T_9; // @[TLB.scala:170:77]
assign pma_checker__entries_T_10 = pma_checker__entries_WIRE_1[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_hr = pma_checker__entries_T_10; // @[TLB.scala:170:77]
assign pma_checker__entries_T_11 = pma_checker__entries_WIRE_1[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_hx = pma_checker__entries_T_11; // @[TLB.scala:170:77]
assign pma_checker__entries_T_12 = pma_checker__entries_WIRE_1[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_hw = pma_checker__entries_T_12; // @[TLB.scala:170:77]
assign pma_checker__entries_T_13 = pma_checker__entries_WIRE_1[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_sr = pma_checker__entries_T_13; // @[TLB.scala:170:77]
assign pma_checker__entries_T_14 = pma_checker__entries_WIRE_1[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_sx = pma_checker__entries_T_14; // @[TLB.scala:170:77]
assign pma_checker__entries_T_15 = pma_checker__entries_WIRE_1[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_sw = pma_checker__entries_T_15; // @[TLB.scala:170:77]
assign pma_checker__entries_T_16 = pma_checker__entries_WIRE_1[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_gf = pma_checker__entries_T_16; // @[TLB.scala:170:77]
assign pma_checker__entries_T_17 = pma_checker__entries_WIRE_1[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_pf = pma_checker__entries_T_17; // @[TLB.scala:170:77]
assign pma_checker__entries_T_18 = pma_checker__entries_WIRE_1[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_ae_stage2 = pma_checker__entries_T_18; // @[TLB.scala:170:77]
assign pma_checker__entries_T_19 = pma_checker__entries_WIRE_1[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_ae_final = pma_checker__entries_T_19; // @[TLB.scala:170:77]
assign pma_checker__entries_T_20 = pma_checker__entries_WIRE_1[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_ae_ptw = pma_checker__entries_T_20; // @[TLB.scala:170:77]
assign pma_checker__entries_T_21 = pma_checker__entries_WIRE_1[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_g = pma_checker__entries_T_21; // @[TLB.scala:170:77]
assign pma_checker__entries_T_22 = pma_checker__entries_WIRE_1[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_u = pma_checker__entries_T_22; // @[TLB.scala:170:77]
assign pma_checker__entries_T_23 = pma_checker__entries_WIRE_1[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_ppn = pma_checker__entries_T_23; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_47; // @[TLB.scala:170:77]
wire pma_checker__entries_T_46; // @[TLB.scala:170:77]
wire pma_checker__entries_T_45; // @[TLB.scala:170:77]
wire pma_checker__entries_T_44; // @[TLB.scala:170:77]
wire pma_checker__entries_T_43; // @[TLB.scala:170:77]
wire pma_checker__entries_T_42; // @[TLB.scala:170:77]
wire pma_checker__entries_T_41; // @[TLB.scala:170:77]
wire pma_checker__entries_T_40; // @[TLB.scala:170:77]
wire pma_checker__entries_T_39; // @[TLB.scala:170:77]
wire pma_checker__entries_T_38; // @[TLB.scala:170:77]
wire pma_checker__entries_T_37; // @[TLB.scala:170:77]
wire pma_checker__entries_T_36; // @[TLB.scala:170:77]
wire pma_checker__entries_T_35; // @[TLB.scala:170:77]
wire pma_checker__entries_T_34; // @[TLB.scala:170:77]
wire pma_checker__entries_T_33; // @[TLB.scala:170:77]
wire pma_checker__entries_T_32; // @[TLB.scala:170:77]
wire pma_checker__entries_T_31; // @[TLB.scala:170:77]
wire pma_checker__entries_T_30; // @[TLB.scala:170:77]
wire pma_checker__entries_T_29; // @[TLB.scala:170:77]
wire pma_checker__entries_T_28; // @[TLB.scala:170:77]
wire pma_checker__entries_T_27; // @[TLB.scala:170:77]
wire pma_checker__entries_T_26; // @[TLB.scala:170:77]
wire pma_checker__entries_T_25; // @[TLB.scala:170:77]
assign pma_checker__entries_T_25 = pma_checker__entries_WIRE_3[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_fragmented_superpage = pma_checker__entries_T_25; // @[TLB.scala:170:77]
assign pma_checker__entries_T_26 = pma_checker__entries_WIRE_3[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_c = pma_checker__entries_T_26; // @[TLB.scala:170:77]
assign pma_checker__entries_T_27 = pma_checker__entries_WIRE_3[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_eff = pma_checker__entries_T_27; // @[TLB.scala:170:77]
assign pma_checker__entries_T_28 = pma_checker__entries_WIRE_3[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_paa = pma_checker__entries_T_28; // @[TLB.scala:170:77]
assign pma_checker__entries_T_29 = pma_checker__entries_WIRE_3[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_pal = pma_checker__entries_T_29; // @[TLB.scala:170:77]
assign pma_checker__entries_T_30 = pma_checker__entries_WIRE_3[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_ppp = pma_checker__entries_T_30; // @[TLB.scala:170:77]
assign pma_checker__entries_T_31 = pma_checker__entries_WIRE_3[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_pr = pma_checker__entries_T_31; // @[TLB.scala:170:77]
assign pma_checker__entries_T_32 = pma_checker__entries_WIRE_3[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_px = pma_checker__entries_T_32; // @[TLB.scala:170:77]
assign pma_checker__entries_T_33 = pma_checker__entries_WIRE_3[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_pw = pma_checker__entries_T_33; // @[TLB.scala:170:77]
assign pma_checker__entries_T_34 = pma_checker__entries_WIRE_3[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_hr = pma_checker__entries_T_34; // @[TLB.scala:170:77]
assign pma_checker__entries_T_35 = pma_checker__entries_WIRE_3[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_hx = pma_checker__entries_T_35; // @[TLB.scala:170:77]
assign pma_checker__entries_T_36 = pma_checker__entries_WIRE_3[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_hw = pma_checker__entries_T_36; // @[TLB.scala:170:77]
assign pma_checker__entries_T_37 = pma_checker__entries_WIRE_3[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_sr = pma_checker__entries_T_37; // @[TLB.scala:170:77]
assign pma_checker__entries_T_38 = pma_checker__entries_WIRE_3[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_sx = pma_checker__entries_T_38; // @[TLB.scala:170:77]
assign pma_checker__entries_T_39 = pma_checker__entries_WIRE_3[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_sw = pma_checker__entries_T_39; // @[TLB.scala:170:77]
assign pma_checker__entries_T_40 = pma_checker__entries_WIRE_3[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_gf = pma_checker__entries_T_40; // @[TLB.scala:170:77]
assign pma_checker__entries_T_41 = pma_checker__entries_WIRE_3[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_pf = pma_checker__entries_T_41; // @[TLB.scala:170:77]
assign pma_checker__entries_T_42 = pma_checker__entries_WIRE_3[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_ae_stage2 = pma_checker__entries_T_42; // @[TLB.scala:170:77]
assign pma_checker__entries_T_43 = pma_checker__entries_WIRE_3[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_ae_final = pma_checker__entries_T_43; // @[TLB.scala:170:77]
assign pma_checker__entries_T_44 = pma_checker__entries_WIRE_3[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_ae_ptw = pma_checker__entries_T_44; // @[TLB.scala:170:77]
assign pma_checker__entries_T_45 = pma_checker__entries_WIRE_3[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_g = pma_checker__entries_T_45; // @[TLB.scala:170:77]
assign pma_checker__entries_T_46 = pma_checker__entries_WIRE_3[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_2_u = pma_checker__entries_T_46; // @[TLB.scala:170:77]
assign pma_checker__entries_T_47 = pma_checker__entries_WIRE_3[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_2_ppn = pma_checker__entries_T_47; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_71; // @[TLB.scala:170:77]
wire pma_checker__entries_T_70; // @[TLB.scala:170:77]
wire pma_checker__entries_T_69; // @[TLB.scala:170:77]
wire pma_checker__entries_T_68; // @[TLB.scala:170:77]
wire pma_checker__entries_T_67; // @[TLB.scala:170:77]
wire pma_checker__entries_T_66; // @[TLB.scala:170:77]
wire pma_checker__entries_T_65; // @[TLB.scala:170:77]
wire pma_checker__entries_T_64; // @[TLB.scala:170:77]
wire pma_checker__entries_T_63; // @[TLB.scala:170:77]
wire pma_checker__entries_T_62; // @[TLB.scala:170:77]
wire pma_checker__entries_T_61; // @[TLB.scala:170:77]
wire pma_checker__entries_T_60; // @[TLB.scala:170:77]
wire pma_checker__entries_T_59; // @[TLB.scala:170:77]
wire pma_checker__entries_T_58; // @[TLB.scala:170:77]
wire pma_checker__entries_T_57; // @[TLB.scala:170:77]
wire pma_checker__entries_T_56; // @[TLB.scala:170:77]
wire pma_checker__entries_T_55; // @[TLB.scala:170:77]
wire pma_checker__entries_T_54; // @[TLB.scala:170:77]
wire pma_checker__entries_T_53; // @[TLB.scala:170:77]
wire pma_checker__entries_T_52; // @[TLB.scala:170:77]
wire pma_checker__entries_T_51; // @[TLB.scala:170:77]
wire pma_checker__entries_T_50; // @[TLB.scala:170:77]
wire pma_checker__entries_T_49; // @[TLB.scala:170:77]
assign pma_checker__entries_T_49 = pma_checker__entries_WIRE_5[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_fragmented_superpage = pma_checker__entries_T_49; // @[TLB.scala:170:77]
assign pma_checker__entries_T_50 = pma_checker__entries_WIRE_5[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_c = pma_checker__entries_T_50; // @[TLB.scala:170:77]
assign pma_checker__entries_T_51 = pma_checker__entries_WIRE_5[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_eff = pma_checker__entries_T_51; // @[TLB.scala:170:77]
assign pma_checker__entries_T_52 = pma_checker__entries_WIRE_5[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_paa = pma_checker__entries_T_52; // @[TLB.scala:170:77]
assign pma_checker__entries_T_53 = pma_checker__entries_WIRE_5[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_pal = pma_checker__entries_T_53; // @[TLB.scala:170:77]
assign pma_checker__entries_T_54 = pma_checker__entries_WIRE_5[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_ppp = pma_checker__entries_T_54; // @[TLB.scala:170:77]
assign pma_checker__entries_T_55 = pma_checker__entries_WIRE_5[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_pr = pma_checker__entries_T_55; // @[TLB.scala:170:77]
assign pma_checker__entries_T_56 = pma_checker__entries_WIRE_5[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_px = pma_checker__entries_T_56; // @[TLB.scala:170:77]
assign pma_checker__entries_T_57 = pma_checker__entries_WIRE_5[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_pw = pma_checker__entries_T_57; // @[TLB.scala:170:77]
assign pma_checker__entries_T_58 = pma_checker__entries_WIRE_5[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_hr = pma_checker__entries_T_58; // @[TLB.scala:170:77]
assign pma_checker__entries_T_59 = pma_checker__entries_WIRE_5[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_hx = pma_checker__entries_T_59; // @[TLB.scala:170:77]
assign pma_checker__entries_T_60 = pma_checker__entries_WIRE_5[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_hw = pma_checker__entries_T_60; // @[TLB.scala:170:77]
assign pma_checker__entries_T_61 = pma_checker__entries_WIRE_5[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_sr = pma_checker__entries_T_61; // @[TLB.scala:170:77]
assign pma_checker__entries_T_62 = pma_checker__entries_WIRE_5[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_sx = pma_checker__entries_T_62; // @[TLB.scala:170:77]
assign pma_checker__entries_T_63 = pma_checker__entries_WIRE_5[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_sw = pma_checker__entries_T_63; // @[TLB.scala:170:77]
assign pma_checker__entries_T_64 = pma_checker__entries_WIRE_5[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_gf = pma_checker__entries_T_64; // @[TLB.scala:170:77]
assign pma_checker__entries_T_65 = pma_checker__entries_WIRE_5[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_pf = pma_checker__entries_T_65; // @[TLB.scala:170:77]
assign pma_checker__entries_T_66 = pma_checker__entries_WIRE_5[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_ae_stage2 = pma_checker__entries_T_66; // @[TLB.scala:170:77]
assign pma_checker__entries_T_67 = pma_checker__entries_WIRE_5[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_ae_final = pma_checker__entries_T_67; // @[TLB.scala:170:77]
assign pma_checker__entries_T_68 = pma_checker__entries_WIRE_5[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_ae_ptw = pma_checker__entries_T_68; // @[TLB.scala:170:77]
assign pma_checker__entries_T_69 = pma_checker__entries_WIRE_5[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_g = pma_checker__entries_T_69; // @[TLB.scala:170:77]
assign pma_checker__entries_T_70 = pma_checker__entries_WIRE_5[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_4_u = pma_checker__entries_T_70; // @[TLB.scala:170:77]
assign pma_checker__entries_T_71 = pma_checker__entries_WIRE_5[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_4_ppn = pma_checker__entries_T_71; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_95; // @[TLB.scala:170:77]
wire pma_checker__entries_T_94; // @[TLB.scala:170:77]
wire pma_checker__entries_T_93; // @[TLB.scala:170:77]
wire pma_checker__entries_T_92; // @[TLB.scala:170:77]
wire pma_checker__entries_T_91; // @[TLB.scala:170:77]
wire pma_checker__entries_T_90; // @[TLB.scala:170:77]
wire pma_checker__entries_T_89; // @[TLB.scala:170:77]
wire pma_checker__entries_T_88; // @[TLB.scala:170:77]
wire pma_checker__entries_T_87; // @[TLB.scala:170:77]
wire pma_checker__entries_T_86; // @[TLB.scala:170:77]
wire pma_checker__entries_T_85; // @[TLB.scala:170:77]
wire pma_checker__entries_T_84; // @[TLB.scala:170:77]
wire pma_checker__entries_T_83; // @[TLB.scala:170:77]
wire pma_checker__entries_T_82; // @[TLB.scala:170:77]
wire pma_checker__entries_T_81; // @[TLB.scala:170:77]
wire pma_checker__entries_T_80; // @[TLB.scala:170:77]
wire pma_checker__entries_T_79; // @[TLB.scala:170:77]
wire pma_checker__entries_T_78; // @[TLB.scala:170:77]
wire pma_checker__entries_T_77; // @[TLB.scala:170:77]
wire pma_checker__entries_T_76; // @[TLB.scala:170:77]
wire pma_checker__entries_T_75; // @[TLB.scala:170:77]
wire pma_checker__entries_T_74; // @[TLB.scala:170:77]
wire pma_checker__entries_T_73; // @[TLB.scala:170:77]
assign pma_checker__entries_T_73 = pma_checker__entries_WIRE_7[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_fragmented_superpage = pma_checker__entries_T_73; // @[TLB.scala:170:77]
assign pma_checker__entries_T_74 = pma_checker__entries_WIRE_7[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_c = pma_checker__entries_T_74; // @[TLB.scala:170:77]
assign pma_checker__entries_T_75 = pma_checker__entries_WIRE_7[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_eff = pma_checker__entries_T_75; // @[TLB.scala:170:77]
assign pma_checker__entries_T_76 = pma_checker__entries_WIRE_7[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_paa = pma_checker__entries_T_76; // @[TLB.scala:170:77]
assign pma_checker__entries_T_77 = pma_checker__entries_WIRE_7[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_pal = pma_checker__entries_T_77; // @[TLB.scala:170:77]
assign pma_checker__entries_T_78 = pma_checker__entries_WIRE_7[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_ppp = pma_checker__entries_T_78; // @[TLB.scala:170:77]
assign pma_checker__entries_T_79 = pma_checker__entries_WIRE_7[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_pr = pma_checker__entries_T_79; // @[TLB.scala:170:77]
assign pma_checker__entries_T_80 = pma_checker__entries_WIRE_7[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_px = pma_checker__entries_T_80; // @[TLB.scala:170:77]
assign pma_checker__entries_T_81 = pma_checker__entries_WIRE_7[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_pw = pma_checker__entries_T_81; // @[TLB.scala:170:77]
assign pma_checker__entries_T_82 = pma_checker__entries_WIRE_7[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_hr = pma_checker__entries_T_82; // @[TLB.scala:170:77]
assign pma_checker__entries_T_83 = pma_checker__entries_WIRE_7[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_hx = pma_checker__entries_T_83; // @[TLB.scala:170:77]
assign pma_checker__entries_T_84 = pma_checker__entries_WIRE_7[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_hw = pma_checker__entries_T_84; // @[TLB.scala:170:77]
assign pma_checker__entries_T_85 = pma_checker__entries_WIRE_7[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_sr = pma_checker__entries_T_85; // @[TLB.scala:170:77]
assign pma_checker__entries_T_86 = pma_checker__entries_WIRE_7[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_sx = pma_checker__entries_T_86; // @[TLB.scala:170:77]
assign pma_checker__entries_T_87 = pma_checker__entries_WIRE_7[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_sw = pma_checker__entries_T_87; // @[TLB.scala:170:77]
assign pma_checker__entries_T_88 = pma_checker__entries_WIRE_7[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_gf = pma_checker__entries_T_88; // @[TLB.scala:170:77]
assign pma_checker__entries_T_89 = pma_checker__entries_WIRE_7[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_pf = pma_checker__entries_T_89; // @[TLB.scala:170:77]
assign pma_checker__entries_T_90 = pma_checker__entries_WIRE_7[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_ae_stage2 = pma_checker__entries_T_90; // @[TLB.scala:170:77]
assign pma_checker__entries_T_91 = pma_checker__entries_WIRE_7[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_ae_final = pma_checker__entries_T_91; // @[TLB.scala:170:77]
assign pma_checker__entries_T_92 = pma_checker__entries_WIRE_7[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_ae_ptw = pma_checker__entries_T_92; // @[TLB.scala:170:77]
assign pma_checker__entries_T_93 = pma_checker__entries_WIRE_7[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_g = pma_checker__entries_T_93; // @[TLB.scala:170:77]
assign pma_checker__entries_T_94 = pma_checker__entries_WIRE_7[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_6_u = pma_checker__entries_T_94; // @[TLB.scala:170:77]
assign pma_checker__entries_T_95 = pma_checker__entries_WIRE_7[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_6_ppn = pma_checker__entries_T_95; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_119; // @[TLB.scala:170:77]
wire pma_checker__entries_T_118; // @[TLB.scala:170:77]
wire pma_checker__entries_T_117; // @[TLB.scala:170:77]
wire pma_checker__entries_T_116; // @[TLB.scala:170:77]
wire pma_checker__entries_T_115; // @[TLB.scala:170:77]
wire pma_checker__entries_T_114; // @[TLB.scala:170:77]
wire pma_checker__entries_T_113; // @[TLB.scala:170:77]
wire pma_checker__entries_T_112; // @[TLB.scala:170:77]
wire pma_checker__entries_T_111; // @[TLB.scala:170:77]
wire pma_checker__entries_T_110; // @[TLB.scala:170:77]
wire pma_checker__entries_T_109; // @[TLB.scala:170:77]
wire pma_checker__entries_T_108; // @[TLB.scala:170:77]
wire pma_checker__entries_T_107; // @[TLB.scala:170:77]
wire pma_checker__entries_T_106; // @[TLB.scala:170:77]
wire pma_checker__entries_T_105; // @[TLB.scala:170:77]
wire pma_checker__entries_T_104; // @[TLB.scala:170:77]
wire pma_checker__entries_T_103; // @[TLB.scala:170:77]
wire pma_checker__entries_T_102; // @[TLB.scala:170:77]
wire pma_checker__entries_T_101; // @[TLB.scala:170:77]
wire pma_checker__entries_T_100; // @[TLB.scala:170:77]
wire pma_checker__entries_T_99; // @[TLB.scala:170:77]
wire pma_checker__entries_T_98; // @[TLB.scala:170:77]
wire pma_checker__entries_T_97; // @[TLB.scala:170:77]
assign pma_checker__entries_T_97 = pma_checker__entries_WIRE_9[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_fragmented_superpage = pma_checker__entries_T_97; // @[TLB.scala:170:77]
assign pma_checker__entries_T_98 = pma_checker__entries_WIRE_9[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_c = pma_checker__entries_T_98; // @[TLB.scala:170:77]
assign pma_checker__entries_T_99 = pma_checker__entries_WIRE_9[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_eff = pma_checker__entries_T_99; // @[TLB.scala:170:77]
assign pma_checker__entries_T_100 = pma_checker__entries_WIRE_9[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_paa = pma_checker__entries_T_100; // @[TLB.scala:170:77]
assign pma_checker__entries_T_101 = pma_checker__entries_WIRE_9[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_pal = pma_checker__entries_T_101; // @[TLB.scala:170:77]
assign pma_checker__entries_T_102 = pma_checker__entries_WIRE_9[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_ppp = pma_checker__entries_T_102; // @[TLB.scala:170:77]
assign pma_checker__entries_T_103 = pma_checker__entries_WIRE_9[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_pr = pma_checker__entries_T_103; // @[TLB.scala:170:77]
assign pma_checker__entries_T_104 = pma_checker__entries_WIRE_9[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_px = pma_checker__entries_T_104; // @[TLB.scala:170:77]
assign pma_checker__entries_T_105 = pma_checker__entries_WIRE_9[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_pw = pma_checker__entries_T_105; // @[TLB.scala:170:77]
assign pma_checker__entries_T_106 = pma_checker__entries_WIRE_9[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_hr = pma_checker__entries_T_106; // @[TLB.scala:170:77]
assign pma_checker__entries_T_107 = pma_checker__entries_WIRE_9[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_hx = pma_checker__entries_T_107; // @[TLB.scala:170:77]
assign pma_checker__entries_T_108 = pma_checker__entries_WIRE_9[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_hw = pma_checker__entries_T_108; // @[TLB.scala:170:77]
assign pma_checker__entries_T_109 = pma_checker__entries_WIRE_9[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_sr = pma_checker__entries_T_109; // @[TLB.scala:170:77]
assign pma_checker__entries_T_110 = pma_checker__entries_WIRE_9[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_sx = pma_checker__entries_T_110; // @[TLB.scala:170:77]
assign pma_checker__entries_T_111 = pma_checker__entries_WIRE_9[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_sw = pma_checker__entries_T_111; // @[TLB.scala:170:77]
assign pma_checker__entries_T_112 = pma_checker__entries_WIRE_9[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_gf = pma_checker__entries_T_112; // @[TLB.scala:170:77]
assign pma_checker__entries_T_113 = pma_checker__entries_WIRE_9[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_pf = pma_checker__entries_T_113; // @[TLB.scala:170:77]
assign pma_checker__entries_T_114 = pma_checker__entries_WIRE_9[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_ae_stage2 = pma_checker__entries_T_114; // @[TLB.scala:170:77]
assign pma_checker__entries_T_115 = pma_checker__entries_WIRE_9[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_ae_final = pma_checker__entries_T_115; // @[TLB.scala:170:77]
assign pma_checker__entries_T_116 = pma_checker__entries_WIRE_9[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_ae_ptw = pma_checker__entries_T_116; // @[TLB.scala:170:77]
assign pma_checker__entries_T_117 = pma_checker__entries_WIRE_9[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_g = pma_checker__entries_T_117; // @[TLB.scala:170:77]
assign pma_checker__entries_T_118 = pma_checker__entries_WIRE_9[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_8_u = pma_checker__entries_T_118; // @[TLB.scala:170:77]
assign pma_checker__entries_T_119 = pma_checker__entries_WIRE_9[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_8_ppn = pma_checker__entries_T_119; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_143; // @[TLB.scala:170:77]
wire pma_checker__entries_T_142; // @[TLB.scala:170:77]
wire pma_checker__entries_T_141; // @[TLB.scala:170:77]
wire pma_checker__entries_T_140; // @[TLB.scala:170:77]
wire pma_checker__entries_T_139; // @[TLB.scala:170:77]
wire pma_checker__entries_T_138; // @[TLB.scala:170:77]
wire pma_checker__entries_T_137; // @[TLB.scala:170:77]
wire pma_checker__entries_T_136; // @[TLB.scala:170:77]
wire pma_checker__entries_T_135; // @[TLB.scala:170:77]
wire pma_checker__entries_T_134; // @[TLB.scala:170:77]
wire pma_checker__entries_T_133; // @[TLB.scala:170:77]
wire pma_checker__entries_T_132; // @[TLB.scala:170:77]
wire pma_checker__entries_T_131; // @[TLB.scala:170:77]
wire pma_checker__entries_T_130; // @[TLB.scala:170:77]
wire pma_checker__entries_T_129; // @[TLB.scala:170:77]
wire pma_checker__entries_T_128; // @[TLB.scala:170:77]
wire pma_checker__entries_T_127; // @[TLB.scala:170:77]
wire pma_checker__entries_T_126; // @[TLB.scala:170:77]
wire pma_checker__entries_T_125; // @[TLB.scala:170:77]
wire pma_checker__entries_T_124; // @[TLB.scala:170:77]
wire pma_checker__entries_T_123; // @[TLB.scala:170:77]
wire pma_checker__entries_T_122; // @[TLB.scala:170:77]
wire pma_checker__entries_T_121; // @[TLB.scala:170:77]
assign pma_checker__entries_T_121 = pma_checker__entries_WIRE_11[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_fragmented_superpage = pma_checker__entries_T_121; // @[TLB.scala:170:77]
assign pma_checker__entries_T_122 = pma_checker__entries_WIRE_11[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_c = pma_checker__entries_T_122; // @[TLB.scala:170:77]
assign pma_checker__entries_T_123 = pma_checker__entries_WIRE_11[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_eff = pma_checker__entries_T_123; // @[TLB.scala:170:77]
assign pma_checker__entries_T_124 = pma_checker__entries_WIRE_11[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_paa = pma_checker__entries_T_124; // @[TLB.scala:170:77]
assign pma_checker__entries_T_125 = pma_checker__entries_WIRE_11[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_pal = pma_checker__entries_T_125; // @[TLB.scala:170:77]
assign pma_checker__entries_T_126 = pma_checker__entries_WIRE_11[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_ppp = pma_checker__entries_T_126; // @[TLB.scala:170:77]
assign pma_checker__entries_T_127 = pma_checker__entries_WIRE_11[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_pr = pma_checker__entries_T_127; // @[TLB.scala:170:77]
assign pma_checker__entries_T_128 = pma_checker__entries_WIRE_11[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_px = pma_checker__entries_T_128; // @[TLB.scala:170:77]
assign pma_checker__entries_T_129 = pma_checker__entries_WIRE_11[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_pw = pma_checker__entries_T_129; // @[TLB.scala:170:77]
assign pma_checker__entries_T_130 = pma_checker__entries_WIRE_11[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_hr = pma_checker__entries_T_130; // @[TLB.scala:170:77]
assign pma_checker__entries_T_131 = pma_checker__entries_WIRE_11[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_hx = pma_checker__entries_T_131; // @[TLB.scala:170:77]
assign pma_checker__entries_T_132 = pma_checker__entries_WIRE_11[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_hw = pma_checker__entries_T_132; // @[TLB.scala:170:77]
assign pma_checker__entries_T_133 = pma_checker__entries_WIRE_11[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_sr = pma_checker__entries_T_133; // @[TLB.scala:170:77]
assign pma_checker__entries_T_134 = pma_checker__entries_WIRE_11[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_sx = pma_checker__entries_T_134; // @[TLB.scala:170:77]
assign pma_checker__entries_T_135 = pma_checker__entries_WIRE_11[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_sw = pma_checker__entries_T_135; // @[TLB.scala:170:77]
assign pma_checker__entries_T_136 = pma_checker__entries_WIRE_11[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_gf = pma_checker__entries_T_136; // @[TLB.scala:170:77]
assign pma_checker__entries_T_137 = pma_checker__entries_WIRE_11[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_pf = pma_checker__entries_T_137; // @[TLB.scala:170:77]
assign pma_checker__entries_T_138 = pma_checker__entries_WIRE_11[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_ae_stage2 = pma_checker__entries_T_138; // @[TLB.scala:170:77]
assign pma_checker__entries_T_139 = pma_checker__entries_WIRE_11[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_ae_final = pma_checker__entries_T_139; // @[TLB.scala:170:77]
assign pma_checker__entries_T_140 = pma_checker__entries_WIRE_11[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_ae_ptw = pma_checker__entries_T_140; // @[TLB.scala:170:77]
assign pma_checker__entries_T_141 = pma_checker__entries_WIRE_11[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_g = pma_checker__entries_T_141; // @[TLB.scala:170:77]
assign pma_checker__entries_T_142 = pma_checker__entries_WIRE_11[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_10_u = pma_checker__entries_T_142; // @[TLB.scala:170:77]
assign pma_checker__entries_T_143 = pma_checker__entries_WIRE_11[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_10_ppn = pma_checker__entries_T_143; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_167; // @[TLB.scala:170:77]
wire pma_checker__entries_T_166; // @[TLB.scala:170:77]
wire pma_checker__entries_T_165; // @[TLB.scala:170:77]
wire pma_checker__entries_T_164; // @[TLB.scala:170:77]
wire pma_checker__entries_T_163; // @[TLB.scala:170:77]
wire pma_checker__entries_T_162; // @[TLB.scala:170:77]
wire pma_checker__entries_T_161; // @[TLB.scala:170:77]
wire pma_checker__entries_T_160; // @[TLB.scala:170:77]
wire pma_checker__entries_T_159; // @[TLB.scala:170:77]
wire pma_checker__entries_T_158; // @[TLB.scala:170:77]
wire pma_checker__entries_T_157; // @[TLB.scala:170:77]
wire pma_checker__entries_T_156; // @[TLB.scala:170:77]
wire pma_checker__entries_T_155; // @[TLB.scala:170:77]
wire pma_checker__entries_T_154; // @[TLB.scala:170:77]
wire pma_checker__entries_T_153; // @[TLB.scala:170:77]
wire pma_checker__entries_T_152; // @[TLB.scala:170:77]
wire pma_checker__entries_T_151; // @[TLB.scala:170:77]
wire pma_checker__entries_T_150; // @[TLB.scala:170:77]
wire pma_checker__entries_T_149; // @[TLB.scala:170:77]
wire pma_checker__entries_T_148; // @[TLB.scala:170:77]
wire pma_checker__entries_T_147; // @[TLB.scala:170:77]
wire pma_checker__entries_T_146; // @[TLB.scala:170:77]
wire pma_checker__entries_T_145; // @[TLB.scala:170:77]
assign pma_checker__entries_T_145 = pma_checker__entries_WIRE_13[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_fragmented_superpage = pma_checker__entries_T_145; // @[TLB.scala:170:77]
assign pma_checker__entries_T_146 = pma_checker__entries_WIRE_13[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_c = pma_checker__entries_T_146; // @[TLB.scala:170:77]
assign pma_checker__entries_T_147 = pma_checker__entries_WIRE_13[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_eff = pma_checker__entries_T_147; // @[TLB.scala:170:77]
assign pma_checker__entries_T_148 = pma_checker__entries_WIRE_13[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_paa = pma_checker__entries_T_148; // @[TLB.scala:170:77]
assign pma_checker__entries_T_149 = pma_checker__entries_WIRE_13[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_pal = pma_checker__entries_T_149; // @[TLB.scala:170:77]
assign pma_checker__entries_T_150 = pma_checker__entries_WIRE_13[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_ppp = pma_checker__entries_T_150; // @[TLB.scala:170:77]
assign pma_checker__entries_T_151 = pma_checker__entries_WIRE_13[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_pr = pma_checker__entries_T_151; // @[TLB.scala:170:77]
assign pma_checker__entries_T_152 = pma_checker__entries_WIRE_13[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_px = pma_checker__entries_T_152; // @[TLB.scala:170:77]
assign pma_checker__entries_T_153 = pma_checker__entries_WIRE_13[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_pw = pma_checker__entries_T_153; // @[TLB.scala:170:77]
assign pma_checker__entries_T_154 = pma_checker__entries_WIRE_13[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_hr = pma_checker__entries_T_154; // @[TLB.scala:170:77]
assign pma_checker__entries_T_155 = pma_checker__entries_WIRE_13[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_hx = pma_checker__entries_T_155; // @[TLB.scala:170:77]
assign pma_checker__entries_T_156 = pma_checker__entries_WIRE_13[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_hw = pma_checker__entries_T_156; // @[TLB.scala:170:77]
assign pma_checker__entries_T_157 = pma_checker__entries_WIRE_13[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_sr = pma_checker__entries_T_157; // @[TLB.scala:170:77]
assign pma_checker__entries_T_158 = pma_checker__entries_WIRE_13[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_sx = pma_checker__entries_T_158; // @[TLB.scala:170:77]
assign pma_checker__entries_T_159 = pma_checker__entries_WIRE_13[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_sw = pma_checker__entries_T_159; // @[TLB.scala:170:77]
assign pma_checker__entries_T_160 = pma_checker__entries_WIRE_13[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_gf = pma_checker__entries_T_160; // @[TLB.scala:170:77]
assign pma_checker__entries_T_161 = pma_checker__entries_WIRE_13[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_pf = pma_checker__entries_T_161; // @[TLB.scala:170:77]
assign pma_checker__entries_T_162 = pma_checker__entries_WIRE_13[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_ae_stage2 = pma_checker__entries_T_162; // @[TLB.scala:170:77]
assign pma_checker__entries_T_163 = pma_checker__entries_WIRE_13[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_ae_final = pma_checker__entries_T_163; // @[TLB.scala:170:77]
assign pma_checker__entries_T_164 = pma_checker__entries_WIRE_13[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_ae_ptw = pma_checker__entries_T_164; // @[TLB.scala:170:77]
assign pma_checker__entries_T_165 = pma_checker__entries_WIRE_13[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_g = pma_checker__entries_T_165; // @[TLB.scala:170:77]
assign pma_checker__entries_T_166 = pma_checker__entries_WIRE_13[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_12_u = pma_checker__entries_T_166; // @[TLB.scala:170:77]
assign pma_checker__entries_T_167 = pma_checker__entries_WIRE_13[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_12_ppn = pma_checker__entries_T_167; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_191; // @[TLB.scala:170:77]
wire pma_checker__entries_T_190; // @[TLB.scala:170:77]
wire pma_checker__entries_T_189; // @[TLB.scala:170:77]
wire pma_checker__entries_T_188; // @[TLB.scala:170:77]
wire pma_checker__entries_T_187; // @[TLB.scala:170:77]
wire pma_checker__entries_T_186; // @[TLB.scala:170:77]
wire pma_checker__entries_T_185; // @[TLB.scala:170:77]
wire pma_checker__entries_T_184; // @[TLB.scala:170:77]
wire pma_checker__entries_T_183; // @[TLB.scala:170:77]
wire pma_checker__entries_T_182; // @[TLB.scala:170:77]
wire pma_checker__entries_T_181; // @[TLB.scala:170:77]
wire pma_checker__entries_T_180; // @[TLB.scala:170:77]
wire pma_checker__entries_T_179; // @[TLB.scala:170:77]
wire pma_checker__entries_T_178; // @[TLB.scala:170:77]
wire pma_checker__entries_T_177; // @[TLB.scala:170:77]
wire pma_checker__entries_T_176; // @[TLB.scala:170:77]
wire pma_checker__entries_T_175; // @[TLB.scala:170:77]
wire pma_checker__entries_T_174; // @[TLB.scala:170:77]
wire pma_checker__entries_T_173; // @[TLB.scala:170:77]
wire pma_checker__entries_T_172; // @[TLB.scala:170:77]
wire pma_checker__entries_T_171; // @[TLB.scala:170:77]
wire pma_checker__entries_T_170; // @[TLB.scala:170:77]
wire pma_checker__entries_T_169; // @[TLB.scala:170:77]
assign pma_checker__entries_T_169 = pma_checker__entries_WIRE_15[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_fragmented_superpage = pma_checker__entries_T_169; // @[TLB.scala:170:77]
assign pma_checker__entries_T_170 = pma_checker__entries_WIRE_15[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_c = pma_checker__entries_T_170; // @[TLB.scala:170:77]
assign pma_checker__entries_T_171 = pma_checker__entries_WIRE_15[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_eff = pma_checker__entries_T_171; // @[TLB.scala:170:77]
assign pma_checker__entries_T_172 = pma_checker__entries_WIRE_15[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_paa = pma_checker__entries_T_172; // @[TLB.scala:170:77]
assign pma_checker__entries_T_173 = pma_checker__entries_WIRE_15[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_pal = pma_checker__entries_T_173; // @[TLB.scala:170:77]
assign pma_checker__entries_T_174 = pma_checker__entries_WIRE_15[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_ppp = pma_checker__entries_T_174; // @[TLB.scala:170:77]
assign pma_checker__entries_T_175 = pma_checker__entries_WIRE_15[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_pr = pma_checker__entries_T_175; // @[TLB.scala:170:77]
assign pma_checker__entries_T_176 = pma_checker__entries_WIRE_15[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_px = pma_checker__entries_T_176; // @[TLB.scala:170:77]
assign pma_checker__entries_T_177 = pma_checker__entries_WIRE_15[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_pw = pma_checker__entries_T_177; // @[TLB.scala:170:77]
assign pma_checker__entries_T_178 = pma_checker__entries_WIRE_15[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_hr = pma_checker__entries_T_178; // @[TLB.scala:170:77]
assign pma_checker__entries_T_179 = pma_checker__entries_WIRE_15[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_hx = pma_checker__entries_T_179; // @[TLB.scala:170:77]
assign pma_checker__entries_T_180 = pma_checker__entries_WIRE_15[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_hw = pma_checker__entries_T_180; // @[TLB.scala:170:77]
assign pma_checker__entries_T_181 = pma_checker__entries_WIRE_15[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_sr = pma_checker__entries_T_181; // @[TLB.scala:170:77]
assign pma_checker__entries_T_182 = pma_checker__entries_WIRE_15[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_sx = pma_checker__entries_T_182; // @[TLB.scala:170:77]
assign pma_checker__entries_T_183 = pma_checker__entries_WIRE_15[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_sw = pma_checker__entries_T_183; // @[TLB.scala:170:77]
assign pma_checker__entries_T_184 = pma_checker__entries_WIRE_15[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_gf = pma_checker__entries_T_184; // @[TLB.scala:170:77]
assign pma_checker__entries_T_185 = pma_checker__entries_WIRE_15[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_pf = pma_checker__entries_T_185; // @[TLB.scala:170:77]
assign pma_checker__entries_T_186 = pma_checker__entries_WIRE_15[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_ae_stage2 = pma_checker__entries_T_186; // @[TLB.scala:170:77]
assign pma_checker__entries_T_187 = pma_checker__entries_WIRE_15[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_ae_final = pma_checker__entries_T_187; // @[TLB.scala:170:77]
assign pma_checker__entries_T_188 = pma_checker__entries_WIRE_15[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_ae_ptw = pma_checker__entries_T_188; // @[TLB.scala:170:77]
assign pma_checker__entries_T_189 = pma_checker__entries_WIRE_15[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_g = pma_checker__entries_T_189; // @[TLB.scala:170:77]
assign pma_checker__entries_T_190 = pma_checker__entries_WIRE_15[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_14_u = pma_checker__entries_T_190; // @[TLB.scala:170:77]
assign pma_checker__entries_T_191 = pma_checker__entries_WIRE_15[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_14_ppn = pma_checker__entries_T_191; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_214; // @[TLB.scala:170:77]
wire pma_checker__entries_T_213; // @[TLB.scala:170:77]
wire pma_checker__entries_T_212; // @[TLB.scala:170:77]
wire pma_checker__entries_T_211; // @[TLB.scala:170:77]
wire pma_checker__entries_T_210; // @[TLB.scala:170:77]
wire pma_checker__entries_T_209; // @[TLB.scala:170:77]
wire pma_checker__entries_T_208; // @[TLB.scala:170:77]
wire pma_checker__entries_T_207; // @[TLB.scala:170:77]
wire pma_checker__entries_T_206; // @[TLB.scala:170:77]
wire pma_checker__entries_T_205; // @[TLB.scala:170:77]
wire pma_checker__entries_T_204; // @[TLB.scala:170:77]
wire pma_checker__entries_T_203; // @[TLB.scala:170:77]
wire pma_checker__entries_T_202; // @[TLB.scala:170:77]
wire pma_checker__entries_T_201; // @[TLB.scala:170:77]
wire pma_checker__entries_T_200; // @[TLB.scala:170:77]
wire pma_checker__entries_T_199; // @[TLB.scala:170:77]
wire pma_checker__entries_T_198; // @[TLB.scala:170:77]
wire pma_checker__entries_T_197; // @[TLB.scala:170:77]
wire pma_checker__entries_T_196; // @[TLB.scala:170:77]
wire pma_checker__entries_T_195; // @[TLB.scala:170:77]
wire pma_checker__entries_T_194; // @[TLB.scala:170:77]
wire pma_checker__entries_T_193; // @[TLB.scala:170:77]
wire pma_checker__entries_T_192; // @[TLB.scala:170:77]
assign pma_checker__entries_T_192 = pma_checker__entries_WIRE_17[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_fragmented_superpage = pma_checker__entries_T_192; // @[TLB.scala:170:77]
assign pma_checker__entries_T_193 = pma_checker__entries_WIRE_17[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_c = pma_checker__entries_T_193; // @[TLB.scala:170:77]
assign pma_checker__entries_T_194 = pma_checker__entries_WIRE_17[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_eff = pma_checker__entries_T_194; // @[TLB.scala:170:77]
assign pma_checker__entries_T_195 = pma_checker__entries_WIRE_17[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_paa = pma_checker__entries_T_195; // @[TLB.scala:170:77]
assign pma_checker__entries_T_196 = pma_checker__entries_WIRE_17[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_pal = pma_checker__entries_T_196; // @[TLB.scala:170:77]
assign pma_checker__entries_T_197 = pma_checker__entries_WIRE_17[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_ppp = pma_checker__entries_T_197; // @[TLB.scala:170:77]
assign pma_checker__entries_T_198 = pma_checker__entries_WIRE_17[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_pr = pma_checker__entries_T_198; // @[TLB.scala:170:77]
assign pma_checker__entries_T_199 = pma_checker__entries_WIRE_17[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_px = pma_checker__entries_T_199; // @[TLB.scala:170:77]
assign pma_checker__entries_T_200 = pma_checker__entries_WIRE_17[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_pw = pma_checker__entries_T_200; // @[TLB.scala:170:77]
assign pma_checker__entries_T_201 = pma_checker__entries_WIRE_17[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_hr = pma_checker__entries_T_201; // @[TLB.scala:170:77]
assign pma_checker__entries_T_202 = pma_checker__entries_WIRE_17[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_hx = pma_checker__entries_T_202; // @[TLB.scala:170:77]
assign pma_checker__entries_T_203 = pma_checker__entries_WIRE_17[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_hw = pma_checker__entries_T_203; // @[TLB.scala:170:77]
assign pma_checker__entries_T_204 = pma_checker__entries_WIRE_17[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_sr = pma_checker__entries_T_204; // @[TLB.scala:170:77]
assign pma_checker__entries_T_205 = pma_checker__entries_WIRE_17[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_sx = pma_checker__entries_T_205; // @[TLB.scala:170:77]
assign pma_checker__entries_T_206 = pma_checker__entries_WIRE_17[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_sw = pma_checker__entries_T_206; // @[TLB.scala:170:77]
assign pma_checker__entries_T_207 = pma_checker__entries_WIRE_17[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_gf = pma_checker__entries_T_207; // @[TLB.scala:170:77]
assign pma_checker__entries_T_208 = pma_checker__entries_WIRE_17[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_pf = pma_checker__entries_T_208; // @[TLB.scala:170:77]
assign pma_checker__entries_T_209 = pma_checker__entries_WIRE_17[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_ae_stage2 = pma_checker__entries_T_209; // @[TLB.scala:170:77]
assign pma_checker__entries_T_210 = pma_checker__entries_WIRE_17[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_ae_final = pma_checker__entries_T_210; // @[TLB.scala:170:77]
assign pma_checker__entries_T_211 = pma_checker__entries_WIRE_17[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_ae_ptw = pma_checker__entries_T_211; // @[TLB.scala:170:77]
assign pma_checker__entries_T_212 = pma_checker__entries_WIRE_17[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_g = pma_checker__entries_T_212; // @[TLB.scala:170:77]
assign pma_checker__entries_T_213 = pma_checker__entries_WIRE_17[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_16_u = pma_checker__entries_T_213; // @[TLB.scala:170:77]
assign pma_checker__entries_T_214 = pma_checker__entries_WIRE_17[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_16_ppn = pma_checker__entries_T_214; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_237; // @[TLB.scala:170:77]
wire pma_checker__entries_T_236; // @[TLB.scala:170:77]
wire pma_checker__entries_T_235; // @[TLB.scala:170:77]
wire pma_checker__entries_T_234; // @[TLB.scala:170:77]
wire pma_checker__entries_T_233; // @[TLB.scala:170:77]
wire pma_checker__entries_T_232; // @[TLB.scala:170:77]
wire pma_checker__entries_T_231; // @[TLB.scala:170:77]
wire pma_checker__entries_T_230; // @[TLB.scala:170:77]
wire pma_checker__entries_T_229; // @[TLB.scala:170:77]
wire pma_checker__entries_T_228; // @[TLB.scala:170:77]
wire pma_checker__entries_T_227; // @[TLB.scala:170:77]
wire pma_checker__entries_T_226; // @[TLB.scala:170:77]
wire pma_checker__entries_T_225; // @[TLB.scala:170:77]
wire pma_checker__entries_T_224; // @[TLB.scala:170:77]
wire pma_checker__entries_T_223; // @[TLB.scala:170:77]
wire pma_checker__entries_T_222; // @[TLB.scala:170:77]
wire pma_checker__entries_T_221; // @[TLB.scala:170:77]
wire pma_checker__entries_T_220; // @[TLB.scala:170:77]
wire pma_checker__entries_T_219; // @[TLB.scala:170:77]
wire pma_checker__entries_T_218; // @[TLB.scala:170:77]
wire pma_checker__entries_T_217; // @[TLB.scala:170:77]
wire pma_checker__entries_T_216; // @[TLB.scala:170:77]
wire pma_checker__entries_T_215; // @[TLB.scala:170:77]
assign pma_checker__entries_T_215 = pma_checker__entries_WIRE_19[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_fragmented_superpage = pma_checker__entries_T_215; // @[TLB.scala:170:77]
assign pma_checker__entries_T_216 = pma_checker__entries_WIRE_19[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_c = pma_checker__entries_T_216; // @[TLB.scala:170:77]
assign pma_checker__entries_T_217 = pma_checker__entries_WIRE_19[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_eff = pma_checker__entries_T_217; // @[TLB.scala:170:77]
assign pma_checker__entries_T_218 = pma_checker__entries_WIRE_19[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_paa = pma_checker__entries_T_218; // @[TLB.scala:170:77]
assign pma_checker__entries_T_219 = pma_checker__entries_WIRE_19[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_pal = pma_checker__entries_T_219; // @[TLB.scala:170:77]
assign pma_checker__entries_T_220 = pma_checker__entries_WIRE_19[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_ppp = pma_checker__entries_T_220; // @[TLB.scala:170:77]
assign pma_checker__entries_T_221 = pma_checker__entries_WIRE_19[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_pr = pma_checker__entries_T_221; // @[TLB.scala:170:77]
assign pma_checker__entries_T_222 = pma_checker__entries_WIRE_19[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_px = pma_checker__entries_T_222; // @[TLB.scala:170:77]
assign pma_checker__entries_T_223 = pma_checker__entries_WIRE_19[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_pw = pma_checker__entries_T_223; // @[TLB.scala:170:77]
assign pma_checker__entries_T_224 = pma_checker__entries_WIRE_19[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_hr = pma_checker__entries_T_224; // @[TLB.scala:170:77]
assign pma_checker__entries_T_225 = pma_checker__entries_WIRE_19[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_hx = pma_checker__entries_T_225; // @[TLB.scala:170:77]
assign pma_checker__entries_T_226 = pma_checker__entries_WIRE_19[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_hw = pma_checker__entries_T_226; // @[TLB.scala:170:77]
assign pma_checker__entries_T_227 = pma_checker__entries_WIRE_19[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_sr = pma_checker__entries_T_227; // @[TLB.scala:170:77]
assign pma_checker__entries_T_228 = pma_checker__entries_WIRE_19[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_sx = pma_checker__entries_T_228; // @[TLB.scala:170:77]
assign pma_checker__entries_T_229 = pma_checker__entries_WIRE_19[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_sw = pma_checker__entries_T_229; // @[TLB.scala:170:77]
assign pma_checker__entries_T_230 = pma_checker__entries_WIRE_19[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_gf = pma_checker__entries_T_230; // @[TLB.scala:170:77]
assign pma_checker__entries_T_231 = pma_checker__entries_WIRE_19[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_pf = pma_checker__entries_T_231; // @[TLB.scala:170:77]
assign pma_checker__entries_T_232 = pma_checker__entries_WIRE_19[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_ae_stage2 = pma_checker__entries_T_232; // @[TLB.scala:170:77]
assign pma_checker__entries_T_233 = pma_checker__entries_WIRE_19[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_ae_final = pma_checker__entries_T_233; // @[TLB.scala:170:77]
assign pma_checker__entries_T_234 = pma_checker__entries_WIRE_19[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_ae_ptw = pma_checker__entries_T_234; // @[TLB.scala:170:77]
assign pma_checker__entries_T_235 = pma_checker__entries_WIRE_19[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_g = pma_checker__entries_T_235; // @[TLB.scala:170:77]
assign pma_checker__entries_T_236 = pma_checker__entries_WIRE_19[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_18_u = pma_checker__entries_T_236; // @[TLB.scala:170:77]
assign pma_checker__entries_T_237 = pma_checker__entries_WIRE_19[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_18_ppn = pma_checker__entries_T_237; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_260; // @[TLB.scala:170:77]
wire pma_checker__entries_T_259; // @[TLB.scala:170:77]
wire pma_checker__entries_T_258; // @[TLB.scala:170:77]
wire pma_checker__entries_T_257; // @[TLB.scala:170:77]
wire pma_checker__entries_T_256; // @[TLB.scala:170:77]
wire pma_checker__entries_T_255; // @[TLB.scala:170:77]
wire pma_checker__entries_T_254; // @[TLB.scala:170:77]
wire pma_checker__entries_T_253; // @[TLB.scala:170:77]
wire pma_checker__entries_T_252; // @[TLB.scala:170:77]
wire pma_checker__entries_T_251; // @[TLB.scala:170:77]
wire pma_checker__entries_T_250; // @[TLB.scala:170:77]
wire pma_checker__entries_T_249; // @[TLB.scala:170:77]
wire pma_checker__entries_T_248; // @[TLB.scala:170:77]
wire pma_checker__entries_T_247; // @[TLB.scala:170:77]
wire pma_checker__entries_T_246; // @[TLB.scala:170:77]
wire pma_checker__entries_T_245; // @[TLB.scala:170:77]
wire pma_checker__entries_T_244; // @[TLB.scala:170:77]
wire pma_checker__entries_T_243; // @[TLB.scala:170:77]
wire pma_checker__entries_T_242; // @[TLB.scala:170:77]
wire pma_checker__entries_T_241; // @[TLB.scala:170:77]
wire pma_checker__entries_T_240; // @[TLB.scala:170:77]
wire pma_checker__entries_T_239; // @[TLB.scala:170:77]
wire pma_checker__entries_T_238; // @[TLB.scala:170:77]
assign pma_checker__entries_T_238 = pma_checker__entries_WIRE_21[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_fragmented_superpage = pma_checker__entries_T_238; // @[TLB.scala:170:77]
assign pma_checker__entries_T_239 = pma_checker__entries_WIRE_21[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_c = pma_checker__entries_T_239; // @[TLB.scala:170:77]
assign pma_checker__entries_T_240 = pma_checker__entries_WIRE_21[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_eff = pma_checker__entries_T_240; // @[TLB.scala:170:77]
assign pma_checker__entries_T_241 = pma_checker__entries_WIRE_21[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_paa = pma_checker__entries_T_241; // @[TLB.scala:170:77]
assign pma_checker__entries_T_242 = pma_checker__entries_WIRE_21[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_pal = pma_checker__entries_T_242; // @[TLB.scala:170:77]
assign pma_checker__entries_T_243 = pma_checker__entries_WIRE_21[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_ppp = pma_checker__entries_T_243; // @[TLB.scala:170:77]
assign pma_checker__entries_T_244 = pma_checker__entries_WIRE_21[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_pr = pma_checker__entries_T_244; // @[TLB.scala:170:77]
assign pma_checker__entries_T_245 = pma_checker__entries_WIRE_21[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_px = pma_checker__entries_T_245; // @[TLB.scala:170:77]
assign pma_checker__entries_T_246 = pma_checker__entries_WIRE_21[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_pw = pma_checker__entries_T_246; // @[TLB.scala:170:77]
assign pma_checker__entries_T_247 = pma_checker__entries_WIRE_21[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_hr = pma_checker__entries_T_247; // @[TLB.scala:170:77]
assign pma_checker__entries_T_248 = pma_checker__entries_WIRE_21[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_hx = pma_checker__entries_T_248; // @[TLB.scala:170:77]
assign pma_checker__entries_T_249 = pma_checker__entries_WIRE_21[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_hw = pma_checker__entries_T_249; // @[TLB.scala:170:77]
assign pma_checker__entries_T_250 = pma_checker__entries_WIRE_21[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_sr = pma_checker__entries_T_250; // @[TLB.scala:170:77]
assign pma_checker__entries_T_251 = pma_checker__entries_WIRE_21[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_sx = pma_checker__entries_T_251; // @[TLB.scala:170:77]
assign pma_checker__entries_T_252 = pma_checker__entries_WIRE_21[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_sw = pma_checker__entries_T_252; // @[TLB.scala:170:77]
assign pma_checker__entries_T_253 = pma_checker__entries_WIRE_21[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_gf = pma_checker__entries_T_253; // @[TLB.scala:170:77]
assign pma_checker__entries_T_254 = pma_checker__entries_WIRE_21[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_pf = pma_checker__entries_T_254; // @[TLB.scala:170:77]
assign pma_checker__entries_T_255 = pma_checker__entries_WIRE_21[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_ae_stage2 = pma_checker__entries_T_255; // @[TLB.scala:170:77]
assign pma_checker__entries_T_256 = pma_checker__entries_WIRE_21[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_ae_final = pma_checker__entries_T_256; // @[TLB.scala:170:77]
assign pma_checker__entries_T_257 = pma_checker__entries_WIRE_21[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_ae_ptw = pma_checker__entries_T_257; // @[TLB.scala:170:77]
assign pma_checker__entries_T_258 = pma_checker__entries_WIRE_21[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_g = pma_checker__entries_T_258; // @[TLB.scala:170:77]
assign pma_checker__entries_T_259 = pma_checker__entries_WIRE_21[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_20_u = pma_checker__entries_T_259; // @[TLB.scala:170:77]
assign pma_checker__entries_T_260 = pma_checker__entries_WIRE_21[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_20_ppn = pma_checker__entries_T_260; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_283; // @[TLB.scala:170:77]
wire pma_checker__entries_T_282; // @[TLB.scala:170:77]
wire pma_checker__entries_T_281; // @[TLB.scala:170:77]
wire pma_checker__entries_T_280; // @[TLB.scala:170:77]
wire pma_checker__entries_T_279; // @[TLB.scala:170:77]
wire pma_checker__entries_T_278; // @[TLB.scala:170:77]
wire pma_checker__entries_T_277; // @[TLB.scala:170:77]
wire pma_checker__entries_T_276; // @[TLB.scala:170:77]
wire pma_checker__entries_T_275; // @[TLB.scala:170:77]
wire pma_checker__entries_T_274; // @[TLB.scala:170:77]
wire pma_checker__entries_T_273; // @[TLB.scala:170:77]
wire pma_checker__entries_T_272; // @[TLB.scala:170:77]
wire pma_checker__entries_T_271; // @[TLB.scala:170:77]
wire pma_checker__entries_T_270; // @[TLB.scala:170:77]
wire pma_checker__entries_T_269; // @[TLB.scala:170:77]
wire pma_checker__entries_T_268; // @[TLB.scala:170:77]
wire pma_checker__entries_T_267; // @[TLB.scala:170:77]
wire pma_checker__entries_T_266; // @[TLB.scala:170:77]
wire pma_checker__entries_T_265; // @[TLB.scala:170:77]
wire pma_checker__entries_T_264; // @[TLB.scala:170:77]
wire pma_checker__entries_T_263; // @[TLB.scala:170:77]
wire pma_checker__entries_T_262; // @[TLB.scala:170:77]
wire pma_checker__entries_T_261; // @[TLB.scala:170:77]
assign pma_checker__entries_T_261 = pma_checker__entries_WIRE_23[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_fragmented_superpage = pma_checker__entries_T_261; // @[TLB.scala:170:77]
assign pma_checker__entries_T_262 = pma_checker__entries_WIRE_23[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_c = pma_checker__entries_T_262; // @[TLB.scala:170:77]
assign pma_checker__entries_T_263 = pma_checker__entries_WIRE_23[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_eff = pma_checker__entries_T_263; // @[TLB.scala:170:77]
assign pma_checker__entries_T_264 = pma_checker__entries_WIRE_23[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_paa = pma_checker__entries_T_264; // @[TLB.scala:170:77]
assign pma_checker__entries_T_265 = pma_checker__entries_WIRE_23[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_pal = pma_checker__entries_T_265; // @[TLB.scala:170:77]
assign pma_checker__entries_T_266 = pma_checker__entries_WIRE_23[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_ppp = pma_checker__entries_T_266; // @[TLB.scala:170:77]
assign pma_checker__entries_T_267 = pma_checker__entries_WIRE_23[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_pr = pma_checker__entries_T_267; // @[TLB.scala:170:77]
assign pma_checker__entries_T_268 = pma_checker__entries_WIRE_23[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_px = pma_checker__entries_T_268; // @[TLB.scala:170:77]
assign pma_checker__entries_T_269 = pma_checker__entries_WIRE_23[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_pw = pma_checker__entries_T_269; // @[TLB.scala:170:77]
assign pma_checker__entries_T_270 = pma_checker__entries_WIRE_23[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_hr = pma_checker__entries_T_270; // @[TLB.scala:170:77]
assign pma_checker__entries_T_271 = pma_checker__entries_WIRE_23[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_hx = pma_checker__entries_T_271; // @[TLB.scala:170:77]
assign pma_checker__entries_T_272 = pma_checker__entries_WIRE_23[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_hw = pma_checker__entries_T_272; // @[TLB.scala:170:77]
assign pma_checker__entries_T_273 = pma_checker__entries_WIRE_23[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_sr = pma_checker__entries_T_273; // @[TLB.scala:170:77]
assign pma_checker__entries_T_274 = pma_checker__entries_WIRE_23[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_sx = pma_checker__entries_T_274; // @[TLB.scala:170:77]
assign pma_checker__entries_T_275 = pma_checker__entries_WIRE_23[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_sw = pma_checker__entries_T_275; // @[TLB.scala:170:77]
assign pma_checker__entries_T_276 = pma_checker__entries_WIRE_23[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_gf = pma_checker__entries_T_276; // @[TLB.scala:170:77]
assign pma_checker__entries_T_277 = pma_checker__entries_WIRE_23[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_pf = pma_checker__entries_T_277; // @[TLB.scala:170:77]
assign pma_checker__entries_T_278 = pma_checker__entries_WIRE_23[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_ae_stage2 = pma_checker__entries_T_278; // @[TLB.scala:170:77]
assign pma_checker__entries_T_279 = pma_checker__entries_WIRE_23[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_ae_final = pma_checker__entries_T_279; // @[TLB.scala:170:77]
assign pma_checker__entries_T_280 = pma_checker__entries_WIRE_23[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_ae_ptw = pma_checker__entries_T_280; // @[TLB.scala:170:77]
assign pma_checker__entries_T_281 = pma_checker__entries_WIRE_23[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_g = pma_checker__entries_T_281; // @[TLB.scala:170:77]
assign pma_checker__entries_T_282 = pma_checker__entries_WIRE_23[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_22_u = pma_checker__entries_T_282; // @[TLB.scala:170:77]
assign pma_checker__entries_T_283 = pma_checker__entries_WIRE_23[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_22_ppn = pma_checker__entries_T_283; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_T_306; // @[TLB.scala:170:77]
wire pma_checker__entries_T_305; // @[TLB.scala:170:77]
wire pma_checker__entries_T_304; // @[TLB.scala:170:77]
wire pma_checker__entries_T_303; // @[TLB.scala:170:77]
wire pma_checker__entries_T_302; // @[TLB.scala:170:77]
wire pma_checker__entries_T_301; // @[TLB.scala:170:77]
wire pma_checker__entries_T_300; // @[TLB.scala:170:77]
wire pma_checker__entries_T_299; // @[TLB.scala:170:77]
wire pma_checker__entries_T_298; // @[TLB.scala:170:77]
wire pma_checker__entries_T_297; // @[TLB.scala:170:77]
wire pma_checker__entries_T_296; // @[TLB.scala:170:77]
wire pma_checker__entries_T_295; // @[TLB.scala:170:77]
wire pma_checker__entries_T_294; // @[TLB.scala:170:77]
wire pma_checker__entries_T_293; // @[TLB.scala:170:77]
wire pma_checker__entries_T_292; // @[TLB.scala:170:77]
wire pma_checker__entries_T_291; // @[TLB.scala:170:77]
wire pma_checker__entries_T_290; // @[TLB.scala:170:77]
wire pma_checker__entries_T_289; // @[TLB.scala:170:77]
wire pma_checker__entries_T_288; // @[TLB.scala:170:77]
wire pma_checker__entries_T_287; // @[TLB.scala:170:77]
wire pma_checker__entries_T_286; // @[TLB.scala:170:77]
wire pma_checker__entries_T_285; // @[TLB.scala:170:77]
wire pma_checker__entries_T_284; // @[TLB.scala:170:77]
assign pma_checker__entries_T_284 = pma_checker__entries_WIRE_25[0]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_fragmented_superpage = pma_checker__entries_T_284; // @[TLB.scala:170:77]
assign pma_checker__entries_T_285 = pma_checker__entries_WIRE_25[1]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_c = pma_checker__entries_T_285; // @[TLB.scala:170:77]
assign pma_checker__entries_T_286 = pma_checker__entries_WIRE_25[2]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_eff = pma_checker__entries_T_286; // @[TLB.scala:170:77]
assign pma_checker__entries_T_287 = pma_checker__entries_WIRE_25[3]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_paa = pma_checker__entries_T_287; // @[TLB.scala:170:77]
assign pma_checker__entries_T_288 = pma_checker__entries_WIRE_25[4]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_pal = pma_checker__entries_T_288; // @[TLB.scala:170:77]
assign pma_checker__entries_T_289 = pma_checker__entries_WIRE_25[5]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_ppp = pma_checker__entries_T_289; // @[TLB.scala:170:77]
assign pma_checker__entries_T_290 = pma_checker__entries_WIRE_25[6]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_pr = pma_checker__entries_T_290; // @[TLB.scala:170:77]
assign pma_checker__entries_T_291 = pma_checker__entries_WIRE_25[7]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_px = pma_checker__entries_T_291; // @[TLB.scala:170:77]
assign pma_checker__entries_T_292 = pma_checker__entries_WIRE_25[8]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_pw = pma_checker__entries_T_292; // @[TLB.scala:170:77]
assign pma_checker__entries_T_293 = pma_checker__entries_WIRE_25[9]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_hr = pma_checker__entries_T_293; // @[TLB.scala:170:77]
assign pma_checker__entries_T_294 = pma_checker__entries_WIRE_25[10]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_hx = pma_checker__entries_T_294; // @[TLB.scala:170:77]
assign pma_checker__entries_T_295 = pma_checker__entries_WIRE_25[11]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_hw = pma_checker__entries_T_295; // @[TLB.scala:170:77]
assign pma_checker__entries_T_296 = pma_checker__entries_WIRE_25[12]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_sr = pma_checker__entries_T_296; // @[TLB.scala:170:77]
assign pma_checker__entries_T_297 = pma_checker__entries_WIRE_25[13]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_sx = pma_checker__entries_T_297; // @[TLB.scala:170:77]
assign pma_checker__entries_T_298 = pma_checker__entries_WIRE_25[14]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_sw = pma_checker__entries_T_298; // @[TLB.scala:170:77]
assign pma_checker__entries_T_299 = pma_checker__entries_WIRE_25[15]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_gf = pma_checker__entries_T_299; // @[TLB.scala:170:77]
assign pma_checker__entries_T_300 = pma_checker__entries_WIRE_25[16]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_pf = pma_checker__entries_T_300; // @[TLB.scala:170:77]
assign pma_checker__entries_T_301 = pma_checker__entries_WIRE_25[17]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_ae_stage2 = pma_checker__entries_T_301; // @[TLB.scala:170:77]
assign pma_checker__entries_T_302 = pma_checker__entries_WIRE_25[18]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_ae_final = pma_checker__entries_T_302; // @[TLB.scala:170:77]
assign pma_checker__entries_T_303 = pma_checker__entries_WIRE_25[19]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_ae_ptw = pma_checker__entries_T_303; // @[TLB.scala:170:77]
assign pma_checker__entries_T_304 = pma_checker__entries_WIRE_25[20]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_g = pma_checker__entries_T_304; // @[TLB.scala:170:77]
assign pma_checker__entries_T_305 = pma_checker__entries_WIRE_25[21]; // @[TLB.scala:170:77]
wire pma_checker__entries_WIRE_24_u = pma_checker__entries_T_305; // @[TLB.scala:170:77]
assign pma_checker__entries_T_306 = pma_checker__entries_WIRE_25[41:22]; // @[TLB.scala:170:77]
wire [19:0] pma_checker__entries_WIRE_24_ppn = pma_checker__entries_T_306; // @[TLB.scala:170:77]
wire [1:0] pma_checker_ppn_res = _pma_checker_entries_barrier_8_io_y_ppn[19:18]; // @[package.scala:267:25]
wire pma_checker_ppn_ignore = pma_checker__ppn_ignore_T; // @[TLB.scala:197:{28,34}]
wire [26:0] pma_checker__ppn_T_1 = pma_checker_ppn_ignore ? pma_checker_vpn : 27'h0; // @[TLB.scala:197:34, :198:28, :335:30]
wire [26:0] pma_checker__ppn_T_2 = {pma_checker__ppn_T_1[26:20], pma_checker__ppn_T_1[19:0] | _pma_checker_entries_barrier_8_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__ppn_T_3 = pma_checker__ppn_T_2[17:9]; // @[TLB.scala:198:{47,58}]
wire [10:0] pma_checker__ppn_T_4 = {pma_checker_ppn_res, pma_checker__ppn_T_3}; // @[TLB.scala:195:26, :198:{18,58}]
wire [26:0] pma_checker__ppn_T_6 = {pma_checker__ppn_T_5[26:20], pma_checker__ppn_T_5[19:0] | _pma_checker_entries_barrier_8_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__ppn_T_7 = pma_checker__ppn_T_6[8:0]; // @[TLB.scala:198:{47,58}]
wire [19:0] pma_checker__ppn_T_8 = {pma_checker__ppn_T_4, pma_checker__ppn_T_7}; // @[TLB.scala:198:{18,58}]
wire [1:0] pma_checker_ppn_res_1 = _pma_checker_entries_barrier_9_io_y_ppn[19:18]; // @[package.scala:267:25]
wire pma_checker_ppn_ignore_2 = pma_checker__ppn_ignore_T_2; // @[TLB.scala:197:{28,34}]
wire [26:0] pma_checker__ppn_T_9 = pma_checker_ppn_ignore_2 ? pma_checker_vpn : 27'h0; // @[TLB.scala:197:34, :198:28, :335:30]
wire [26:0] pma_checker__ppn_T_10 = {pma_checker__ppn_T_9[26:20], pma_checker__ppn_T_9[19:0] | _pma_checker_entries_barrier_9_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__ppn_T_11 = pma_checker__ppn_T_10[17:9]; // @[TLB.scala:198:{47,58}]
wire [10:0] pma_checker__ppn_T_12 = {pma_checker_ppn_res_1, pma_checker__ppn_T_11}; // @[TLB.scala:195:26, :198:{18,58}]
wire [26:0] pma_checker__ppn_T_14 = {pma_checker__ppn_T_13[26:20], pma_checker__ppn_T_13[19:0] | _pma_checker_entries_barrier_9_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__ppn_T_15 = pma_checker__ppn_T_14[8:0]; // @[TLB.scala:198:{47,58}]
wire [19:0] pma_checker__ppn_T_16 = {pma_checker__ppn_T_12, pma_checker__ppn_T_15}; // @[TLB.scala:198:{18,58}]
wire [1:0] pma_checker_ppn_res_2 = _pma_checker_entries_barrier_10_io_y_ppn[19:18]; // @[package.scala:267:25]
wire pma_checker_ppn_ignore_4 = pma_checker__ppn_ignore_T_4; // @[TLB.scala:197:{28,34}]
wire [26:0] pma_checker__ppn_T_17 = pma_checker_ppn_ignore_4 ? pma_checker_vpn : 27'h0; // @[TLB.scala:197:34, :198:28, :335:30]
wire [26:0] pma_checker__ppn_T_18 = {pma_checker__ppn_T_17[26:20], pma_checker__ppn_T_17[19:0] | _pma_checker_entries_barrier_10_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__ppn_T_19 = pma_checker__ppn_T_18[17:9]; // @[TLB.scala:198:{47,58}]
wire [10:0] pma_checker__ppn_T_20 = {pma_checker_ppn_res_2, pma_checker__ppn_T_19}; // @[TLB.scala:195:26, :198:{18,58}]
wire [26:0] pma_checker__ppn_T_22 = {pma_checker__ppn_T_21[26:20], pma_checker__ppn_T_21[19:0] | _pma_checker_entries_barrier_10_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__ppn_T_23 = pma_checker__ppn_T_22[8:0]; // @[TLB.scala:198:{47,58}]
wire [19:0] pma_checker__ppn_T_24 = {pma_checker__ppn_T_20, pma_checker__ppn_T_23}; // @[TLB.scala:198:{18,58}]
wire [1:0] pma_checker_ppn_res_3 = _pma_checker_entries_barrier_11_io_y_ppn[19:18]; // @[package.scala:267:25]
wire pma_checker_ppn_ignore_6 = pma_checker__ppn_ignore_T_6; // @[TLB.scala:197:{28,34}]
wire [26:0] pma_checker__ppn_T_25 = pma_checker_ppn_ignore_6 ? pma_checker_vpn : 27'h0; // @[TLB.scala:197:34, :198:28, :335:30]
wire [26:0] pma_checker__ppn_T_26 = {pma_checker__ppn_T_25[26:20], pma_checker__ppn_T_25[19:0] | _pma_checker_entries_barrier_11_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__ppn_T_27 = pma_checker__ppn_T_26[17:9]; // @[TLB.scala:198:{47,58}]
wire [10:0] pma_checker__ppn_T_28 = {pma_checker_ppn_res_3, pma_checker__ppn_T_27}; // @[TLB.scala:195:26, :198:{18,58}]
wire [26:0] pma_checker__ppn_T_30 = {pma_checker__ppn_T_29[26:20], pma_checker__ppn_T_29[19:0] | _pma_checker_entries_barrier_11_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__ppn_T_31 = pma_checker__ppn_T_30[8:0]; // @[TLB.scala:198:{47,58}]
wire [19:0] pma_checker__ppn_T_32 = {pma_checker__ppn_T_28, pma_checker__ppn_T_31}; // @[TLB.scala:198:{18,58}]
wire [1:0] pma_checker_ppn_res_4 = _pma_checker_entries_barrier_12_io_y_ppn[19:18]; // @[package.scala:267:25]
wire [26:0] pma_checker__ppn_T_34 = {pma_checker__ppn_T_33[26:20], pma_checker__ppn_T_33[19:0] | _pma_checker_entries_barrier_12_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__ppn_T_35 = pma_checker__ppn_T_34[17:9]; // @[TLB.scala:198:{47,58}]
wire [10:0] pma_checker__ppn_T_36 = {pma_checker_ppn_res_4, pma_checker__ppn_T_35}; // @[TLB.scala:195:26, :198:{18,58}]
wire [26:0] pma_checker__ppn_T_38 = {pma_checker__ppn_T_37[26:20], pma_checker__ppn_T_37[19:0] | _pma_checker_entries_barrier_12_io_y_ppn}; // @[package.scala:267:25]
wire [8:0] pma_checker__ppn_T_39 = pma_checker__ppn_T_38[8:0]; // @[TLB.scala:198:{47,58}]
wire [19:0] pma_checker__ppn_T_40 = {pma_checker__ppn_T_36, pma_checker__ppn_T_39}; // @[TLB.scala:198:{18,58}]
wire [19:0] pma_checker__ppn_T_41 = pma_checker_vpn[19:0]; // @[TLB.scala:335:30, :502:125]
wire [19:0] pma_checker__ppn_T_55 = pma_checker__ppn_T_41; // @[Mux.scala:30:73]
wire [19:0] pma_checker__ppn_T_68 = pma_checker__ppn_T_55; // @[Mux.scala:30:73]
wire [19:0] pma_checker_ppn = pma_checker__ppn_T_68; // @[Mux.scala:30:73]
wire [1:0] pma_checker_ptw_ae_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_ae_ptw, _pma_checker_entries_barrier_1_io_y_ae_ptw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ptw_ae_array_lo_lo = {pma_checker_ptw_ae_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_ae_ptw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ptw_ae_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_ae_ptw, _pma_checker_entries_barrier_4_io_y_ae_ptw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ptw_ae_array_lo_hi = {pma_checker_ptw_ae_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_ae_ptw}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_ptw_ae_array_lo = {pma_checker_ptw_ae_array_lo_hi, pma_checker_ptw_ae_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_ptw_ae_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_ae_ptw, _pma_checker_entries_barrier_7_io_y_ae_ptw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ptw_ae_array_hi_lo = {pma_checker_ptw_ae_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_ae_ptw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ptw_ae_array_hi_hi_lo = {_pma_checker_entries_barrier_10_io_y_ae_ptw, _pma_checker_entries_barrier_9_io_y_ae_ptw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ptw_ae_array_hi_hi_hi = {_pma_checker_entries_barrier_12_io_y_ae_ptw, _pma_checker_entries_barrier_11_io_y_ae_ptw}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_ptw_ae_array_hi_hi = {pma_checker_ptw_ae_array_hi_hi_hi, pma_checker_ptw_ae_array_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_ptw_ae_array_hi = {pma_checker_ptw_ae_array_hi_hi, pma_checker_ptw_ae_array_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__ptw_ae_array_T = {pma_checker_ptw_ae_array_hi, pma_checker_ptw_ae_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker_ptw_ae_array = {1'h0, pma_checker__ptw_ae_array_T}; // @[package.scala:45:27]
wire [1:0] pma_checker_final_ae_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_ae_final, _pma_checker_entries_barrier_1_io_y_ae_final}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_final_ae_array_lo_lo = {pma_checker_final_ae_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_ae_final}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_final_ae_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_ae_final, _pma_checker_entries_barrier_4_io_y_ae_final}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_final_ae_array_lo_hi = {pma_checker_final_ae_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_ae_final}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_final_ae_array_lo = {pma_checker_final_ae_array_lo_hi, pma_checker_final_ae_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_final_ae_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_ae_final, _pma_checker_entries_barrier_7_io_y_ae_final}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_final_ae_array_hi_lo = {pma_checker_final_ae_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_ae_final}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_final_ae_array_hi_hi_lo = {_pma_checker_entries_barrier_10_io_y_ae_final, _pma_checker_entries_barrier_9_io_y_ae_final}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_final_ae_array_hi_hi_hi = {_pma_checker_entries_barrier_12_io_y_ae_final, _pma_checker_entries_barrier_11_io_y_ae_final}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_final_ae_array_hi_hi = {pma_checker_final_ae_array_hi_hi_hi, pma_checker_final_ae_array_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_final_ae_array_hi = {pma_checker_final_ae_array_hi_hi, pma_checker_final_ae_array_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__final_ae_array_T = {pma_checker_final_ae_array_hi, pma_checker_final_ae_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker_final_ae_array = {1'h0, pma_checker__final_ae_array_T}; // @[package.scala:45:27]
wire [1:0] pma_checker_ptw_pf_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_pf, _pma_checker_entries_barrier_1_io_y_pf}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ptw_pf_array_lo_lo = {pma_checker_ptw_pf_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_pf}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ptw_pf_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_pf, _pma_checker_entries_barrier_4_io_y_pf}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ptw_pf_array_lo_hi = {pma_checker_ptw_pf_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_pf}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_ptw_pf_array_lo = {pma_checker_ptw_pf_array_lo_hi, pma_checker_ptw_pf_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_ptw_pf_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_pf, _pma_checker_entries_barrier_7_io_y_pf}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ptw_pf_array_hi_lo = {pma_checker_ptw_pf_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_pf}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ptw_pf_array_hi_hi_lo = {_pma_checker_entries_barrier_10_io_y_pf, _pma_checker_entries_barrier_9_io_y_pf}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ptw_pf_array_hi_hi_hi = {_pma_checker_entries_barrier_12_io_y_pf, _pma_checker_entries_barrier_11_io_y_pf}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_ptw_pf_array_hi_hi = {pma_checker_ptw_pf_array_hi_hi_hi, pma_checker_ptw_pf_array_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_ptw_pf_array_hi = {pma_checker_ptw_pf_array_hi_hi, pma_checker_ptw_pf_array_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__ptw_pf_array_T = {pma_checker_ptw_pf_array_hi, pma_checker_ptw_pf_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker_ptw_pf_array = {1'h0, pma_checker__ptw_pf_array_T}; // @[package.scala:45:27]
wire [1:0] pma_checker_ptw_gf_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_gf, _pma_checker_entries_barrier_1_io_y_gf}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ptw_gf_array_lo_lo = {pma_checker_ptw_gf_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_gf}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ptw_gf_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_gf, _pma_checker_entries_barrier_4_io_y_gf}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ptw_gf_array_lo_hi = {pma_checker_ptw_gf_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_gf}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_ptw_gf_array_lo = {pma_checker_ptw_gf_array_lo_hi, pma_checker_ptw_gf_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_ptw_gf_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_gf, _pma_checker_entries_barrier_7_io_y_gf}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ptw_gf_array_hi_lo = {pma_checker_ptw_gf_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_gf}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ptw_gf_array_hi_hi_lo = {_pma_checker_entries_barrier_10_io_y_gf, _pma_checker_entries_barrier_9_io_y_gf}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ptw_gf_array_hi_hi_hi = {_pma_checker_entries_barrier_12_io_y_gf, _pma_checker_entries_barrier_11_io_y_gf}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_ptw_gf_array_hi_hi = {pma_checker_ptw_gf_array_hi_hi_hi, pma_checker_ptw_gf_array_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_ptw_gf_array_hi = {pma_checker_ptw_gf_array_hi_hi, pma_checker_ptw_gf_array_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__ptw_gf_array_T = {pma_checker_ptw_gf_array_hi, pma_checker_ptw_gf_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker_ptw_gf_array = {1'h0, pma_checker__ptw_gf_array_T}; // @[package.scala:45:27]
wire [13:0] pma_checker__gf_ld_array_T_3 = pma_checker_ptw_gf_array; // @[TLB.scala:509:25, :600:82]
wire [13:0] pma_checker__gf_st_array_T_2 = pma_checker_ptw_gf_array; // @[TLB.scala:509:25, :601:63]
wire [13:0] pma_checker__gf_inst_array_T_1 = pma_checker_ptw_gf_array; // @[TLB.scala:509:25, :602:46]
wire pma_checker__priv_rw_ok_T = ~pma_checker_priv_s; // @[TLB.scala:370:20, :513:24]
wire pma_checker__priv_rw_ok_T_1 = pma_checker__priv_rw_ok_T; // @[TLB.scala:513:{24,32}]
wire [1:0] _GEN_7 = {_pma_checker_entries_barrier_2_io_y_u, _pma_checker_entries_barrier_1_io_y_u}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_priv_rw_ok_lo_lo_hi; // @[package.scala:45:27]
assign pma_checker_priv_rw_ok_lo_lo_hi = _GEN_7; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_rw_ok_lo_lo_hi_1; // @[package.scala:45:27]
assign pma_checker_priv_rw_ok_lo_lo_hi_1 = _GEN_7; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_x_ok_lo_lo_hi; // @[package.scala:45:27]
assign pma_checker_priv_x_ok_lo_lo_hi = _GEN_7; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_x_ok_lo_lo_hi_1; // @[package.scala:45:27]
assign pma_checker_priv_x_ok_lo_lo_hi_1 = _GEN_7; // @[package.scala:45:27]
wire [2:0] pma_checker_priv_rw_ok_lo_lo = {pma_checker_priv_rw_ok_lo_lo_hi, _pma_checker_entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25]
wire [1:0] _GEN_8 = {_pma_checker_entries_barrier_5_io_y_u, _pma_checker_entries_barrier_4_io_y_u}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_priv_rw_ok_lo_hi_hi; // @[package.scala:45:27]
assign pma_checker_priv_rw_ok_lo_hi_hi = _GEN_8; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_rw_ok_lo_hi_hi_1; // @[package.scala:45:27]
assign pma_checker_priv_rw_ok_lo_hi_hi_1 = _GEN_8; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_x_ok_lo_hi_hi; // @[package.scala:45:27]
assign pma_checker_priv_x_ok_lo_hi_hi = _GEN_8; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_x_ok_lo_hi_hi_1; // @[package.scala:45:27]
assign pma_checker_priv_x_ok_lo_hi_hi_1 = _GEN_8; // @[package.scala:45:27]
wire [2:0] pma_checker_priv_rw_ok_lo_hi = {pma_checker_priv_rw_ok_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_priv_rw_ok_lo = {pma_checker_priv_rw_ok_lo_hi, pma_checker_priv_rw_ok_lo_lo}; // @[package.scala:45:27]
wire [1:0] _GEN_9 = {_pma_checker_entries_barrier_8_io_y_u, _pma_checker_entries_barrier_7_io_y_u}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_priv_rw_ok_hi_lo_hi; // @[package.scala:45:27]
assign pma_checker_priv_rw_ok_hi_lo_hi = _GEN_9; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_rw_ok_hi_lo_hi_1; // @[package.scala:45:27]
assign pma_checker_priv_rw_ok_hi_lo_hi_1 = _GEN_9; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_x_ok_hi_lo_hi; // @[package.scala:45:27]
assign pma_checker_priv_x_ok_hi_lo_hi = _GEN_9; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_x_ok_hi_lo_hi_1; // @[package.scala:45:27]
assign pma_checker_priv_x_ok_hi_lo_hi_1 = _GEN_9; // @[package.scala:45:27]
wire [2:0] pma_checker_priv_rw_ok_hi_lo = {pma_checker_priv_rw_ok_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_u}; // @[package.scala:45:27, :267:25]
wire [1:0] _GEN_10 = {_pma_checker_entries_barrier_10_io_y_u, _pma_checker_entries_barrier_9_io_y_u}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_priv_rw_ok_hi_hi_lo; // @[package.scala:45:27]
assign pma_checker_priv_rw_ok_hi_hi_lo = _GEN_10; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_rw_ok_hi_hi_lo_1; // @[package.scala:45:27]
assign pma_checker_priv_rw_ok_hi_hi_lo_1 = _GEN_10; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_x_ok_hi_hi_lo; // @[package.scala:45:27]
assign pma_checker_priv_x_ok_hi_hi_lo = _GEN_10; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_x_ok_hi_hi_lo_1; // @[package.scala:45:27]
assign pma_checker_priv_x_ok_hi_hi_lo_1 = _GEN_10; // @[package.scala:45:27]
wire [1:0] _GEN_11 = {_pma_checker_entries_barrier_12_io_y_u, _pma_checker_entries_barrier_11_io_y_u}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_priv_rw_ok_hi_hi_hi; // @[package.scala:45:27]
assign pma_checker_priv_rw_ok_hi_hi_hi = _GEN_11; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_rw_ok_hi_hi_hi_1; // @[package.scala:45:27]
assign pma_checker_priv_rw_ok_hi_hi_hi_1 = _GEN_11; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_x_ok_hi_hi_hi; // @[package.scala:45:27]
assign pma_checker_priv_x_ok_hi_hi_hi = _GEN_11; // @[package.scala:45:27]
wire [1:0] pma_checker_priv_x_ok_hi_hi_hi_1; // @[package.scala:45:27]
assign pma_checker_priv_x_ok_hi_hi_hi_1 = _GEN_11; // @[package.scala:45:27]
wire [3:0] pma_checker_priv_rw_ok_hi_hi = {pma_checker_priv_rw_ok_hi_hi_hi, pma_checker_priv_rw_ok_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_priv_rw_ok_hi = {pma_checker_priv_rw_ok_hi_hi, pma_checker_priv_rw_ok_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__priv_rw_ok_T_2 = {pma_checker_priv_rw_ok_hi, pma_checker_priv_rw_ok_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__priv_rw_ok_T_3 = pma_checker__priv_rw_ok_T_1 ? pma_checker__priv_rw_ok_T_2 : 13'h0; // @[package.scala:45:27]
wire [2:0] pma_checker_priv_rw_ok_lo_lo_1 = {pma_checker_priv_rw_ok_lo_lo_hi_1, _pma_checker_entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_priv_rw_ok_lo_hi_1 = {pma_checker_priv_rw_ok_lo_hi_hi_1, _pma_checker_entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_priv_rw_ok_lo_1 = {pma_checker_priv_rw_ok_lo_hi_1, pma_checker_priv_rw_ok_lo_lo_1}; // @[package.scala:45:27]
wire [2:0] pma_checker_priv_rw_ok_hi_lo_1 = {pma_checker_priv_rw_ok_hi_lo_hi_1, _pma_checker_entries_barrier_6_io_y_u}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_priv_rw_ok_hi_hi_1 = {pma_checker_priv_rw_ok_hi_hi_hi_1, pma_checker_priv_rw_ok_hi_hi_lo_1}; // @[package.scala:45:27]
wire [6:0] pma_checker_priv_rw_ok_hi_1 = {pma_checker_priv_rw_ok_hi_hi_1, pma_checker_priv_rw_ok_hi_lo_1}; // @[package.scala:45:27]
wire [12:0] pma_checker__priv_rw_ok_T_4 = {pma_checker_priv_rw_ok_hi_1, pma_checker_priv_rw_ok_lo_1}; // @[package.scala:45:27]
wire [12:0] pma_checker__priv_rw_ok_T_5 = ~pma_checker__priv_rw_ok_T_4; // @[package.scala:45:27]
wire [12:0] pma_checker__priv_rw_ok_T_6 = pma_checker_priv_s ? pma_checker__priv_rw_ok_T_5 : 13'h0; // @[TLB.scala:370:20, :513:{75,84}]
wire [12:0] pma_checker_priv_rw_ok = pma_checker__priv_rw_ok_T_3 | pma_checker__priv_rw_ok_T_6; // @[TLB.scala:513:{23,70,75}]
wire [2:0] pma_checker_priv_x_ok_lo_lo = {pma_checker_priv_x_ok_lo_lo_hi, _pma_checker_entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_priv_x_ok_lo_hi = {pma_checker_priv_x_ok_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_priv_x_ok_lo = {pma_checker_priv_x_ok_lo_hi, pma_checker_priv_x_ok_lo_lo}; // @[package.scala:45:27]
wire [2:0] pma_checker_priv_x_ok_hi_lo = {pma_checker_priv_x_ok_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_u}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_priv_x_ok_hi_hi = {pma_checker_priv_x_ok_hi_hi_hi, pma_checker_priv_x_ok_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_priv_x_ok_hi = {pma_checker_priv_x_ok_hi_hi, pma_checker_priv_x_ok_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__priv_x_ok_T = {pma_checker_priv_x_ok_hi, pma_checker_priv_x_ok_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__priv_x_ok_T_1 = ~pma_checker__priv_x_ok_T; // @[package.scala:45:27]
wire [2:0] pma_checker_priv_x_ok_lo_lo_1 = {pma_checker_priv_x_ok_lo_lo_hi_1, _pma_checker_entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_priv_x_ok_lo_hi_1 = {pma_checker_priv_x_ok_lo_hi_hi_1, _pma_checker_entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_priv_x_ok_lo_1 = {pma_checker_priv_x_ok_lo_hi_1, pma_checker_priv_x_ok_lo_lo_1}; // @[package.scala:45:27]
wire [2:0] pma_checker_priv_x_ok_hi_lo_1 = {pma_checker_priv_x_ok_hi_lo_hi_1, _pma_checker_entries_barrier_6_io_y_u}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_priv_x_ok_hi_hi_1 = {pma_checker_priv_x_ok_hi_hi_hi_1, pma_checker_priv_x_ok_hi_hi_lo_1}; // @[package.scala:45:27]
wire [6:0] pma_checker_priv_x_ok_hi_1 = {pma_checker_priv_x_ok_hi_hi_1, pma_checker_priv_x_ok_hi_lo_1}; // @[package.scala:45:27]
wire [12:0] pma_checker__priv_x_ok_T_2 = {pma_checker_priv_x_ok_hi_1, pma_checker_priv_x_ok_lo_1}; // @[package.scala:45:27]
wire [12:0] pma_checker_priv_x_ok = pma_checker_priv_s ? pma_checker__priv_x_ok_T_1 : pma_checker__priv_x_ok_T_2; // @[package.scala:45:27]
wire [1:0] pma_checker_stage1_bypass_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_ae_stage2, _pma_checker_entries_barrier_1_io_y_ae_stage2}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_stage1_bypass_lo_lo = {pma_checker_stage1_bypass_lo_lo_hi, _pma_checker_entries_barrier_io_y_ae_stage2}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_stage1_bypass_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_ae_stage2, _pma_checker_entries_barrier_4_io_y_ae_stage2}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_stage1_bypass_lo_hi = {pma_checker_stage1_bypass_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_ae_stage2}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_stage1_bypass_lo = {pma_checker_stage1_bypass_lo_hi, pma_checker_stage1_bypass_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_stage1_bypass_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_ae_stage2, _pma_checker_entries_barrier_7_io_y_ae_stage2}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_stage1_bypass_hi_lo = {pma_checker_stage1_bypass_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_ae_stage2}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_stage1_bypass_hi_hi_lo = {_pma_checker_entries_barrier_10_io_y_ae_stage2, _pma_checker_entries_barrier_9_io_y_ae_stage2}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_stage1_bypass_hi_hi_hi = {_pma_checker_entries_barrier_12_io_y_ae_stage2, _pma_checker_entries_barrier_11_io_y_ae_stage2}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_stage1_bypass_hi_hi = {pma_checker_stage1_bypass_hi_hi_hi, pma_checker_stage1_bypass_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_stage1_bypass_hi = {pma_checker_stage1_bypass_hi_hi, pma_checker_stage1_bypass_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__stage1_bypass_T_3 = {pma_checker_stage1_bypass_hi, pma_checker_stage1_bypass_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_r_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_sr, _pma_checker_entries_barrier_1_io_y_sr}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_r_array_lo_lo = {pma_checker_r_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_sr}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_r_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_sr, _pma_checker_entries_barrier_4_io_y_sr}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_r_array_lo_hi = {pma_checker_r_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_sr}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_r_array_lo = {pma_checker_r_array_lo_hi, pma_checker_r_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_r_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_sr, _pma_checker_entries_barrier_7_io_y_sr}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_r_array_hi_lo = {pma_checker_r_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_sr}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_r_array_hi_hi_lo = {_pma_checker_entries_barrier_10_io_y_sr, _pma_checker_entries_barrier_9_io_y_sr}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_r_array_hi_hi_hi = {_pma_checker_entries_barrier_12_io_y_sr, _pma_checker_entries_barrier_11_io_y_sr}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_r_array_hi_hi = {pma_checker_r_array_hi_hi_hi, pma_checker_r_array_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_r_array_hi = {pma_checker_r_array_hi_hi, pma_checker_r_array_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__r_array_T = {pma_checker_r_array_hi, pma_checker_r_array_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__r_array_T_3 = pma_checker__r_array_T; // @[package.scala:45:27]
wire [1:0] _GEN_12 = {_pma_checker_entries_barrier_2_io_y_sx, _pma_checker_entries_barrier_1_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_r_array_lo_lo_hi_1; // @[package.scala:45:27]
assign pma_checker_r_array_lo_lo_hi_1 = _GEN_12; // @[package.scala:45:27]
wire [1:0] pma_checker_x_array_lo_lo_hi; // @[package.scala:45:27]
assign pma_checker_x_array_lo_lo_hi = _GEN_12; // @[package.scala:45:27]
wire [2:0] pma_checker_r_array_lo_lo_1 = {pma_checker_r_array_lo_lo_hi_1, _pma_checker_entries_barrier_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [1:0] _GEN_13 = {_pma_checker_entries_barrier_5_io_y_sx, _pma_checker_entries_barrier_4_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_r_array_lo_hi_hi_1; // @[package.scala:45:27]
assign pma_checker_r_array_lo_hi_hi_1 = _GEN_13; // @[package.scala:45:27]
wire [1:0] pma_checker_x_array_lo_hi_hi; // @[package.scala:45:27]
assign pma_checker_x_array_lo_hi_hi = _GEN_13; // @[package.scala:45:27]
wire [2:0] pma_checker_r_array_lo_hi_1 = {pma_checker_r_array_lo_hi_hi_1, _pma_checker_entries_barrier_3_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_r_array_lo_1 = {pma_checker_r_array_lo_hi_1, pma_checker_r_array_lo_lo_1}; // @[package.scala:45:27]
wire [1:0] _GEN_14 = {_pma_checker_entries_barrier_8_io_y_sx, _pma_checker_entries_barrier_7_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_r_array_hi_lo_hi_1; // @[package.scala:45:27]
assign pma_checker_r_array_hi_lo_hi_1 = _GEN_14; // @[package.scala:45:27]
wire [1:0] pma_checker_x_array_hi_lo_hi; // @[package.scala:45:27]
assign pma_checker_x_array_hi_lo_hi = _GEN_14; // @[package.scala:45:27]
wire [2:0] pma_checker_r_array_hi_lo_1 = {pma_checker_r_array_hi_lo_hi_1, _pma_checker_entries_barrier_6_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [1:0] _GEN_15 = {_pma_checker_entries_barrier_10_io_y_sx, _pma_checker_entries_barrier_9_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_r_array_hi_hi_lo_1; // @[package.scala:45:27]
assign pma_checker_r_array_hi_hi_lo_1 = _GEN_15; // @[package.scala:45:27]
wire [1:0] pma_checker_x_array_hi_hi_lo; // @[package.scala:45:27]
assign pma_checker_x_array_hi_hi_lo = _GEN_15; // @[package.scala:45:27]
wire [1:0] _GEN_16 = {_pma_checker_entries_barrier_12_io_y_sx, _pma_checker_entries_barrier_11_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_r_array_hi_hi_hi_1; // @[package.scala:45:27]
assign pma_checker_r_array_hi_hi_hi_1 = _GEN_16; // @[package.scala:45:27]
wire [1:0] pma_checker_x_array_hi_hi_hi; // @[package.scala:45:27]
assign pma_checker_x_array_hi_hi_hi = _GEN_16; // @[package.scala:45:27]
wire [3:0] pma_checker_r_array_hi_hi_1 = {pma_checker_r_array_hi_hi_hi_1, pma_checker_r_array_hi_hi_lo_1}; // @[package.scala:45:27]
wire [6:0] pma_checker_r_array_hi_1 = {pma_checker_r_array_hi_hi_1, pma_checker_r_array_hi_lo_1}; // @[package.scala:45:27]
wire [12:0] pma_checker__r_array_T_1 = {pma_checker_r_array_hi_1, pma_checker_r_array_lo_1}; // @[package.scala:45:27]
wire [12:0] pma_checker__r_array_T_4 = pma_checker_priv_rw_ok & pma_checker__r_array_T_3; // @[TLB.scala:513:70, :520:{41,69}]
wire [12:0] pma_checker__r_array_T_5 = pma_checker__r_array_T_4; // @[TLB.scala:520:{41,113}]
wire [13:0] pma_checker_r_array = {1'h1, pma_checker__r_array_T_5}; // @[TLB.scala:520:{20,113}]
wire [13:0] pma_checker__pf_ld_array_T = pma_checker_r_array; // @[TLB.scala:520:20, :597:41]
wire [1:0] pma_checker_w_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_sw, _pma_checker_entries_barrier_1_io_y_sw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_w_array_lo_lo = {pma_checker_w_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_sw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_w_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_sw, _pma_checker_entries_barrier_4_io_y_sw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_w_array_lo_hi = {pma_checker_w_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_sw}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_w_array_lo = {pma_checker_w_array_lo_hi, pma_checker_w_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_w_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_sw, _pma_checker_entries_barrier_7_io_y_sw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_w_array_hi_lo = {pma_checker_w_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_sw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_w_array_hi_hi_lo = {_pma_checker_entries_barrier_10_io_y_sw, _pma_checker_entries_barrier_9_io_y_sw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_w_array_hi_hi_hi = {_pma_checker_entries_barrier_12_io_y_sw, _pma_checker_entries_barrier_11_io_y_sw}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_w_array_hi_hi = {pma_checker_w_array_hi_hi_hi, pma_checker_w_array_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_w_array_hi = {pma_checker_w_array_hi_hi, pma_checker_w_array_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__w_array_T = {pma_checker_w_array_hi, pma_checker_w_array_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__w_array_T_1 = pma_checker_priv_rw_ok & pma_checker__w_array_T; // @[package.scala:45:27]
wire [12:0] pma_checker__w_array_T_2 = pma_checker__w_array_T_1; // @[TLB.scala:521:{41,69}]
wire [13:0] pma_checker_w_array = {1'h1, pma_checker__w_array_T_2}; // @[TLB.scala:521:{20,69}]
wire [2:0] pma_checker_x_array_lo_lo = {pma_checker_x_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_x_array_lo_hi = {pma_checker_x_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_x_array_lo = {pma_checker_x_array_lo_hi, pma_checker_x_array_lo_lo}; // @[package.scala:45:27]
wire [2:0] pma_checker_x_array_hi_lo = {pma_checker_x_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_sx}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_x_array_hi_hi = {pma_checker_x_array_hi_hi_hi, pma_checker_x_array_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_x_array_hi = {pma_checker_x_array_hi_hi, pma_checker_x_array_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__x_array_T = {pma_checker_x_array_hi, pma_checker_x_array_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__x_array_T_1 = pma_checker_priv_x_ok & pma_checker__x_array_T; // @[package.scala:45:27]
wire [12:0] pma_checker__x_array_T_2 = pma_checker__x_array_T_1; // @[TLB.scala:522:{40,68}]
wire [13:0] pma_checker_x_array = {1'h1, pma_checker__x_array_T_2}; // @[TLB.scala:522:{20,68}]
wire [1:0] pma_checker_hr_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_hr, _pma_checker_entries_barrier_1_io_y_hr}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_hr_array_lo_lo = {pma_checker_hr_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_hr}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hr_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_hr, _pma_checker_entries_barrier_4_io_y_hr}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_hr_array_lo_hi = {pma_checker_hr_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_hr}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_hr_array_lo = {pma_checker_hr_array_lo_hi, pma_checker_hr_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_hr_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_hr, _pma_checker_entries_barrier_7_io_y_hr}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_hr_array_hi_lo = {pma_checker_hr_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_hr}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hr_array_hi_hi_lo = {_pma_checker_entries_barrier_10_io_y_hr, _pma_checker_entries_barrier_9_io_y_hr}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hr_array_hi_hi_hi = {_pma_checker_entries_barrier_12_io_y_hr, _pma_checker_entries_barrier_11_io_y_hr}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_hr_array_hi_hi = {pma_checker_hr_array_hi_hi_hi, pma_checker_hr_array_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_hr_array_hi = {pma_checker_hr_array_hi_hi, pma_checker_hr_array_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__hr_array_T = {pma_checker_hr_array_hi, pma_checker_hr_array_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__hr_array_T_3 = pma_checker__hr_array_T; // @[package.scala:45:27]
wire [1:0] _GEN_17 = {_pma_checker_entries_barrier_2_io_y_hx, _pma_checker_entries_barrier_1_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hr_array_lo_lo_hi_1; // @[package.scala:45:27]
assign pma_checker_hr_array_lo_lo_hi_1 = _GEN_17; // @[package.scala:45:27]
wire [1:0] pma_checker_hx_array_lo_lo_hi; // @[package.scala:45:27]
assign pma_checker_hx_array_lo_lo_hi = _GEN_17; // @[package.scala:45:27]
wire [2:0] pma_checker_hr_array_lo_lo_1 = {pma_checker_hr_array_lo_lo_hi_1, _pma_checker_entries_barrier_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [1:0] _GEN_18 = {_pma_checker_entries_barrier_5_io_y_hx, _pma_checker_entries_barrier_4_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hr_array_lo_hi_hi_1; // @[package.scala:45:27]
assign pma_checker_hr_array_lo_hi_hi_1 = _GEN_18; // @[package.scala:45:27]
wire [1:0] pma_checker_hx_array_lo_hi_hi; // @[package.scala:45:27]
assign pma_checker_hx_array_lo_hi_hi = _GEN_18; // @[package.scala:45:27]
wire [2:0] pma_checker_hr_array_lo_hi_1 = {pma_checker_hr_array_lo_hi_hi_1, _pma_checker_entries_barrier_3_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_hr_array_lo_1 = {pma_checker_hr_array_lo_hi_1, pma_checker_hr_array_lo_lo_1}; // @[package.scala:45:27]
wire [1:0] _GEN_19 = {_pma_checker_entries_barrier_8_io_y_hx, _pma_checker_entries_barrier_7_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hr_array_hi_lo_hi_1; // @[package.scala:45:27]
assign pma_checker_hr_array_hi_lo_hi_1 = _GEN_19; // @[package.scala:45:27]
wire [1:0] pma_checker_hx_array_hi_lo_hi; // @[package.scala:45:27]
assign pma_checker_hx_array_hi_lo_hi = _GEN_19; // @[package.scala:45:27]
wire [2:0] pma_checker_hr_array_hi_lo_1 = {pma_checker_hr_array_hi_lo_hi_1, _pma_checker_entries_barrier_6_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [1:0] _GEN_20 = {_pma_checker_entries_barrier_10_io_y_hx, _pma_checker_entries_barrier_9_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hr_array_hi_hi_lo_1; // @[package.scala:45:27]
assign pma_checker_hr_array_hi_hi_lo_1 = _GEN_20; // @[package.scala:45:27]
wire [1:0] pma_checker_hx_array_hi_hi_lo; // @[package.scala:45:27]
assign pma_checker_hx_array_hi_hi_lo = _GEN_20; // @[package.scala:45:27]
wire [1:0] _GEN_21 = {_pma_checker_entries_barrier_12_io_y_hx, _pma_checker_entries_barrier_11_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hr_array_hi_hi_hi_1; // @[package.scala:45:27]
assign pma_checker_hr_array_hi_hi_hi_1 = _GEN_21; // @[package.scala:45:27]
wire [1:0] pma_checker_hx_array_hi_hi_hi; // @[package.scala:45:27]
assign pma_checker_hx_array_hi_hi_hi = _GEN_21; // @[package.scala:45:27]
wire [3:0] pma_checker_hr_array_hi_hi_1 = {pma_checker_hr_array_hi_hi_hi_1, pma_checker_hr_array_hi_hi_lo_1}; // @[package.scala:45:27]
wire [6:0] pma_checker_hr_array_hi_1 = {pma_checker_hr_array_hi_hi_1, pma_checker_hr_array_hi_lo_1}; // @[package.scala:45:27]
wire [12:0] pma_checker__hr_array_T_1 = {pma_checker_hr_array_hi_1, pma_checker_hr_array_lo_1}; // @[package.scala:45:27]
wire [1:0] pma_checker_hw_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_hw, _pma_checker_entries_barrier_1_io_y_hw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_hw_array_lo_lo = {pma_checker_hw_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_hw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hw_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_hw, _pma_checker_entries_barrier_4_io_y_hw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_hw_array_lo_hi = {pma_checker_hw_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_hw}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_hw_array_lo = {pma_checker_hw_array_lo_hi, pma_checker_hw_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_hw_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_hw, _pma_checker_entries_barrier_7_io_y_hw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_hw_array_hi_lo = {pma_checker_hw_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_hw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hw_array_hi_hi_lo = {_pma_checker_entries_barrier_10_io_y_hw, _pma_checker_entries_barrier_9_io_y_hw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_hw_array_hi_hi_hi = {_pma_checker_entries_barrier_12_io_y_hw, _pma_checker_entries_barrier_11_io_y_hw}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_hw_array_hi_hi = {pma_checker_hw_array_hi_hi_hi, pma_checker_hw_array_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_hw_array_hi = {pma_checker_hw_array_hi_hi, pma_checker_hw_array_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__hw_array_T = {pma_checker_hw_array_hi, pma_checker_hw_array_lo}; // @[package.scala:45:27]
wire [2:0] pma_checker_hx_array_lo_lo = {pma_checker_hx_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_hx_array_lo_hi = {pma_checker_hx_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_hx_array_lo = {pma_checker_hx_array_lo_hi, pma_checker_hx_array_lo_lo}; // @[package.scala:45:27]
wire [2:0] pma_checker_hx_array_hi_lo = {pma_checker_hx_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_hx}; // @[package.scala:45:27, :267:25]
wire [3:0] pma_checker_hx_array_hi_hi = {pma_checker_hx_array_hi_hi_hi, pma_checker_hx_array_hi_hi_lo}; // @[package.scala:45:27]
wire [6:0] pma_checker_hx_array_hi = {pma_checker_hx_array_hi_hi, pma_checker_hx_array_hi_lo}; // @[package.scala:45:27]
wire [12:0] pma_checker__hx_array_T = {pma_checker_hx_array_hi, pma_checker_hx_array_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker__pr_array_T = {2{pma_checker_prot_r}}; // @[TLB.scala:429:55, :529:26]
wire [1:0] pma_checker_pr_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_pr, _pma_checker_entries_barrier_1_io_y_pr}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pr_array_lo_lo = {pma_checker_pr_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_pr}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_pr_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_pr, _pma_checker_entries_barrier_4_io_y_pr}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pr_array_lo_hi = {pma_checker_pr_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_pr}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_pr_array_lo = {pma_checker_pr_array_lo_hi, pma_checker_pr_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_pr_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_pr, _pma_checker_entries_barrier_7_io_y_pr}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pr_array_hi_lo = {pma_checker_pr_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_pr}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_pr_array_hi_hi_hi = {_pma_checker_entries_barrier_11_io_y_pr, _pma_checker_entries_barrier_10_io_y_pr}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pr_array_hi_hi = {pma_checker_pr_array_hi_hi_hi, _pma_checker_entries_barrier_9_io_y_pr}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_pr_array_hi = {pma_checker_pr_array_hi_hi, pma_checker_pr_array_hi_lo}; // @[package.scala:45:27]
wire [11:0] pma_checker__pr_array_T_1 = {pma_checker_pr_array_hi, pma_checker_pr_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker__pr_array_T_2 = {pma_checker__pr_array_T, pma_checker__pr_array_T_1}; // @[package.scala:45:27]
wire [13:0] _GEN_22 = pma_checker_ptw_ae_array | pma_checker_final_ae_array; // @[TLB.scala:506:25, :507:27, :529:104]
wire [13:0] pma_checker__pr_array_T_3; // @[TLB.scala:529:104]
assign pma_checker__pr_array_T_3 = _GEN_22; // @[TLB.scala:529:104]
wire [13:0] pma_checker__pw_array_T_3; // @[TLB.scala:531:104]
assign pma_checker__pw_array_T_3 = _GEN_22; // @[TLB.scala:529:104, :531:104]
wire [13:0] pma_checker__px_array_T_3; // @[TLB.scala:533:104]
assign pma_checker__px_array_T_3 = _GEN_22; // @[TLB.scala:529:104, :533:104]
wire [13:0] pma_checker__pr_array_T_4 = ~pma_checker__pr_array_T_3; // @[TLB.scala:529:{89,104}]
wire [13:0] pma_checker_pr_array = pma_checker__pr_array_T_2 & pma_checker__pr_array_T_4; // @[TLB.scala:529:{21,87,89}]
wire [1:0] pma_checker__pw_array_T = {2{pma_checker_prot_w}}; // @[TLB.scala:430:55, :531:26]
wire [1:0] pma_checker_pw_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_pw, _pma_checker_entries_barrier_1_io_y_pw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pw_array_lo_lo = {pma_checker_pw_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_pw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_pw_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_pw, _pma_checker_entries_barrier_4_io_y_pw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pw_array_lo_hi = {pma_checker_pw_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_pw}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_pw_array_lo = {pma_checker_pw_array_lo_hi, pma_checker_pw_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_pw_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_pw, _pma_checker_entries_barrier_7_io_y_pw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pw_array_hi_lo = {pma_checker_pw_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_pw}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_pw_array_hi_hi_hi = {_pma_checker_entries_barrier_11_io_y_pw, _pma_checker_entries_barrier_10_io_y_pw}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pw_array_hi_hi = {pma_checker_pw_array_hi_hi_hi, _pma_checker_entries_barrier_9_io_y_pw}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_pw_array_hi = {pma_checker_pw_array_hi_hi, pma_checker_pw_array_hi_lo}; // @[package.scala:45:27]
wire [11:0] pma_checker__pw_array_T_1 = {pma_checker_pw_array_hi, pma_checker_pw_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker__pw_array_T_2 = {pma_checker__pw_array_T, pma_checker__pw_array_T_1}; // @[package.scala:45:27]
wire [13:0] pma_checker__pw_array_T_4 = ~pma_checker__pw_array_T_3; // @[TLB.scala:531:{89,104}]
wire [13:0] pma_checker_pw_array = pma_checker__pw_array_T_2 & pma_checker__pw_array_T_4; // @[TLB.scala:531:{21,87,89}]
wire [1:0] pma_checker__px_array_T = {2{pma_checker_prot_x}}; // @[TLB.scala:434:55, :533:26]
wire [1:0] pma_checker_px_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_px, _pma_checker_entries_barrier_1_io_y_px}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_px_array_lo_lo = {pma_checker_px_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_px}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_px_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_px, _pma_checker_entries_barrier_4_io_y_px}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_px_array_lo_hi = {pma_checker_px_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_px}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_px_array_lo = {pma_checker_px_array_lo_hi, pma_checker_px_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_px_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_px, _pma_checker_entries_barrier_7_io_y_px}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_px_array_hi_lo = {pma_checker_px_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_px}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_px_array_hi_hi_hi = {_pma_checker_entries_barrier_11_io_y_px, _pma_checker_entries_barrier_10_io_y_px}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_px_array_hi_hi = {pma_checker_px_array_hi_hi_hi, _pma_checker_entries_barrier_9_io_y_px}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_px_array_hi = {pma_checker_px_array_hi_hi, pma_checker_px_array_hi_lo}; // @[package.scala:45:27]
wire [11:0] pma_checker__px_array_T_1 = {pma_checker_px_array_hi, pma_checker_px_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker__px_array_T_2 = {pma_checker__px_array_T, pma_checker__px_array_T_1}; // @[package.scala:45:27]
wire [13:0] pma_checker__px_array_T_4 = ~pma_checker__px_array_T_3; // @[TLB.scala:533:{89,104}]
wire [13:0] pma_checker_px_array = pma_checker__px_array_T_2 & pma_checker__px_array_T_4; // @[TLB.scala:533:{21,87,89}]
wire [1:0] pma_checker__eff_array_T = {2{_pma_checker_pma_io_resp_eff}}; // @[TLB.scala:422:19, :535:27]
wire [1:0] pma_checker_eff_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_eff, _pma_checker_entries_barrier_1_io_y_eff}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_eff_array_lo_lo = {pma_checker_eff_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_eff}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_eff_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_eff, _pma_checker_entries_barrier_4_io_y_eff}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_eff_array_lo_hi = {pma_checker_eff_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_eff}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_eff_array_lo = {pma_checker_eff_array_lo_hi, pma_checker_eff_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_eff_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_eff, _pma_checker_entries_barrier_7_io_y_eff}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_eff_array_hi_lo = {pma_checker_eff_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_eff}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_eff_array_hi_hi_hi = {_pma_checker_entries_barrier_11_io_y_eff, _pma_checker_entries_barrier_10_io_y_eff}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_eff_array_hi_hi = {pma_checker_eff_array_hi_hi_hi, _pma_checker_entries_barrier_9_io_y_eff}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_eff_array_hi = {pma_checker_eff_array_hi_hi, pma_checker_eff_array_hi_lo}; // @[package.scala:45:27]
wire [11:0] pma_checker__eff_array_T_1 = {pma_checker_eff_array_hi, pma_checker_eff_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker_eff_array = {pma_checker__eff_array_T, pma_checker__eff_array_T_1}; // @[package.scala:45:27]
wire [1:0] pma_checker__c_array_T = {2{pma_checker_cacheable}}; // @[TLB.scala:425:41, :537:25]
wire [1:0] _GEN_23 = {_pma_checker_entries_barrier_2_io_y_c, _pma_checker_entries_barrier_1_io_y_c}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_c_array_lo_lo_hi; // @[package.scala:45:27]
assign pma_checker_c_array_lo_lo_hi = _GEN_23; // @[package.scala:45:27]
wire [1:0] pma_checker_prefetchable_array_lo_lo_hi; // @[package.scala:45:27]
assign pma_checker_prefetchable_array_lo_lo_hi = _GEN_23; // @[package.scala:45:27]
wire [2:0] pma_checker_c_array_lo_lo = {pma_checker_c_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_c}; // @[package.scala:45:27, :267:25]
wire [1:0] _GEN_24 = {_pma_checker_entries_barrier_5_io_y_c, _pma_checker_entries_barrier_4_io_y_c}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_c_array_lo_hi_hi; // @[package.scala:45:27]
assign pma_checker_c_array_lo_hi_hi = _GEN_24; // @[package.scala:45:27]
wire [1:0] pma_checker_prefetchable_array_lo_hi_hi; // @[package.scala:45:27]
assign pma_checker_prefetchable_array_lo_hi_hi = _GEN_24; // @[package.scala:45:27]
wire [2:0] pma_checker_c_array_lo_hi = {pma_checker_c_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_c}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_c_array_lo = {pma_checker_c_array_lo_hi, pma_checker_c_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] _GEN_25 = {_pma_checker_entries_barrier_8_io_y_c, _pma_checker_entries_barrier_7_io_y_c}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_c_array_hi_lo_hi; // @[package.scala:45:27]
assign pma_checker_c_array_hi_lo_hi = _GEN_25; // @[package.scala:45:27]
wire [1:0] pma_checker_prefetchable_array_hi_lo_hi; // @[package.scala:45:27]
assign pma_checker_prefetchable_array_hi_lo_hi = _GEN_25; // @[package.scala:45:27]
wire [2:0] pma_checker_c_array_hi_lo = {pma_checker_c_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_c}; // @[package.scala:45:27, :267:25]
wire [1:0] _GEN_26 = {_pma_checker_entries_barrier_11_io_y_c, _pma_checker_entries_barrier_10_io_y_c}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_c_array_hi_hi_hi; // @[package.scala:45:27]
assign pma_checker_c_array_hi_hi_hi = _GEN_26; // @[package.scala:45:27]
wire [1:0] pma_checker_prefetchable_array_hi_hi_hi; // @[package.scala:45:27]
assign pma_checker_prefetchable_array_hi_hi_hi = _GEN_26; // @[package.scala:45:27]
wire [2:0] pma_checker_c_array_hi_hi = {pma_checker_c_array_hi_hi_hi, _pma_checker_entries_barrier_9_io_y_c}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_c_array_hi = {pma_checker_c_array_hi_hi, pma_checker_c_array_hi_lo}; // @[package.scala:45:27]
wire [11:0] pma_checker__c_array_T_1 = {pma_checker_c_array_hi, pma_checker_c_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker_c_array = {pma_checker__c_array_T, pma_checker__c_array_T_1}; // @[package.scala:45:27]
wire [13:0] pma_checker_lrscAllowed = pma_checker_c_array; // @[TLB.scala:537:20, :580:24]
wire [1:0] pma_checker__ppp_array_T = {2{_pma_checker_pma_io_resp_pp}}; // @[TLB.scala:422:19, :539:27]
wire [1:0] pma_checker_ppp_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_ppp, _pma_checker_entries_barrier_1_io_y_ppp}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ppp_array_lo_lo = {pma_checker_ppp_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_ppp}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ppp_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_ppp, _pma_checker_entries_barrier_4_io_y_ppp}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ppp_array_lo_hi = {pma_checker_ppp_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_ppp}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_ppp_array_lo = {pma_checker_ppp_array_lo_hi, pma_checker_ppp_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_ppp_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_ppp, _pma_checker_entries_barrier_7_io_y_ppp}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ppp_array_hi_lo = {pma_checker_ppp_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_ppp}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_ppp_array_hi_hi_hi = {_pma_checker_entries_barrier_11_io_y_ppp, _pma_checker_entries_barrier_10_io_y_ppp}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_ppp_array_hi_hi = {pma_checker_ppp_array_hi_hi_hi, _pma_checker_entries_barrier_9_io_y_ppp}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_ppp_array_hi = {pma_checker_ppp_array_hi_hi, pma_checker_ppp_array_hi_lo}; // @[package.scala:45:27]
wire [11:0] pma_checker__ppp_array_T_1 = {pma_checker_ppp_array_hi, pma_checker_ppp_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker_ppp_array = {pma_checker__ppp_array_T, pma_checker__ppp_array_T_1}; // @[package.scala:45:27]
wire [1:0] pma_checker__paa_array_T = {2{_pma_checker_pma_io_resp_aa}}; // @[TLB.scala:422:19, :541:27]
wire [1:0] pma_checker_paa_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_paa, _pma_checker_entries_barrier_1_io_y_paa}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_paa_array_lo_lo = {pma_checker_paa_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_paa}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_paa_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_paa, _pma_checker_entries_barrier_4_io_y_paa}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_paa_array_lo_hi = {pma_checker_paa_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_paa}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_paa_array_lo = {pma_checker_paa_array_lo_hi, pma_checker_paa_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_paa_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_paa, _pma_checker_entries_barrier_7_io_y_paa}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_paa_array_hi_lo = {pma_checker_paa_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_paa}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_paa_array_hi_hi_hi = {_pma_checker_entries_barrier_11_io_y_paa, _pma_checker_entries_barrier_10_io_y_paa}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_paa_array_hi_hi = {pma_checker_paa_array_hi_hi_hi, _pma_checker_entries_barrier_9_io_y_paa}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_paa_array_hi = {pma_checker_paa_array_hi_hi, pma_checker_paa_array_hi_lo}; // @[package.scala:45:27]
wire [11:0] pma_checker__paa_array_T_1 = {pma_checker_paa_array_hi, pma_checker_paa_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker_paa_array = {pma_checker__paa_array_T, pma_checker__paa_array_T_1}; // @[package.scala:45:27]
wire [13:0] pma_checker_paa_array_if_cached = pma_checker_paa_array; // @[TLB.scala:541:22, :545:39]
wire [1:0] pma_checker__pal_array_T = {2{_pma_checker_pma_io_resp_al}}; // @[TLB.scala:422:19, :543:27]
wire [1:0] pma_checker_pal_array_lo_lo_hi = {_pma_checker_entries_barrier_2_io_y_pal, _pma_checker_entries_barrier_1_io_y_pal}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pal_array_lo_lo = {pma_checker_pal_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_pal}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_pal_array_lo_hi_hi = {_pma_checker_entries_barrier_5_io_y_pal, _pma_checker_entries_barrier_4_io_y_pal}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pal_array_lo_hi = {pma_checker_pal_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_pal}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_pal_array_lo = {pma_checker_pal_array_lo_hi, pma_checker_pal_array_lo_lo}; // @[package.scala:45:27]
wire [1:0] pma_checker_pal_array_hi_lo_hi = {_pma_checker_entries_barrier_8_io_y_pal, _pma_checker_entries_barrier_7_io_y_pal}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pal_array_hi_lo = {pma_checker_pal_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_pal}; // @[package.scala:45:27, :267:25]
wire [1:0] pma_checker_pal_array_hi_hi_hi = {_pma_checker_entries_barrier_11_io_y_pal, _pma_checker_entries_barrier_10_io_y_pal}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_pal_array_hi_hi = {pma_checker_pal_array_hi_hi_hi, _pma_checker_entries_barrier_9_io_y_pal}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_pal_array_hi = {pma_checker_pal_array_hi_hi, pma_checker_pal_array_hi_lo}; // @[package.scala:45:27]
wire [11:0] pma_checker__pal_array_T_1 = {pma_checker_pal_array_hi, pma_checker_pal_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker_pal_array = {pma_checker__pal_array_T, pma_checker__pal_array_T_1}; // @[package.scala:45:27]
wire [13:0] pma_checker_pal_array_if_cached = pma_checker_pal_array; // @[TLB.scala:543:22, :546:39]
wire [13:0] pma_checker_ppp_array_if_cached = pma_checker_ppp_array | pma_checker_c_array; // @[TLB.scala:537:20, :539:22, :544:39]
wire pma_checker__prefetchable_array_T = pma_checker_cacheable & pma_checker_homogeneous; // @[TLBPermissions.scala:101:65]
wire [1:0] pma_checker__prefetchable_array_T_1 = {pma_checker__prefetchable_array_T, 1'h0}; // @[TLB.scala:547:{43,59}]
wire [2:0] pma_checker_prefetchable_array_lo_lo = {pma_checker_prefetchable_array_lo_lo_hi, _pma_checker_entries_barrier_io_y_c}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_prefetchable_array_lo_hi = {pma_checker_prefetchable_array_lo_hi_hi, _pma_checker_entries_barrier_3_io_y_c}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_prefetchable_array_lo = {pma_checker_prefetchable_array_lo_hi, pma_checker_prefetchable_array_lo_lo}; // @[package.scala:45:27]
wire [2:0] pma_checker_prefetchable_array_hi_lo = {pma_checker_prefetchable_array_hi_lo_hi, _pma_checker_entries_barrier_6_io_y_c}; // @[package.scala:45:27, :267:25]
wire [2:0] pma_checker_prefetchable_array_hi_hi = {pma_checker_prefetchable_array_hi_hi_hi, _pma_checker_entries_barrier_9_io_y_c}; // @[package.scala:45:27, :267:25]
wire [5:0] pma_checker_prefetchable_array_hi = {pma_checker_prefetchable_array_hi_hi, pma_checker_prefetchable_array_hi_lo}; // @[package.scala:45:27]
wire [11:0] pma_checker__prefetchable_array_T_2 = {pma_checker_prefetchable_array_hi, pma_checker_prefetchable_array_lo}; // @[package.scala:45:27]
wire [13:0] pma_checker_prefetchable_array = {pma_checker__prefetchable_array_T_1, pma_checker__prefetchable_array_T_2}; // @[package.scala:45:27]
wire [3:0] pma_checker__misaligned_T = 4'h1 << pma_checker_io_req_bits_size; // @[OneHot.scala:58:35]
wire [4:0] pma_checker__misaligned_T_1 = {1'h0, pma_checker__misaligned_T} - 5'h1; // @[OneHot.scala:58:35]
wire [3:0] pma_checker__misaligned_T_2 = pma_checker__misaligned_T_1[3:0]; // @[TLB.scala:550:69]
wire [39:0] pma_checker__misaligned_T_3 = {36'h0, pma_checker_io_req_bits_vaddr[3:0] & pma_checker__misaligned_T_2}; // @[TLB.scala:550:{39,69}]
wire pma_checker_misaligned = |pma_checker__misaligned_T_3; // @[TLB.scala:550:{39,77}]
wire [39:0] pma_checker_bad_va_maskedVAddr = pma_checker_io_req_bits_vaddr & 40'hC000000000; // @[TLB.scala:559:43]
wire pma_checker__bad_va_T_2 = pma_checker_bad_va_maskedVAddr == 40'h0; // @[TLB.scala:559:43, :560:51]
wire pma_checker__bad_va_T_3 = pma_checker_bad_va_maskedVAddr == 40'hC000000000; // @[TLB.scala:559:43, :560:86]
wire pma_checker__bad_va_T_4 = pma_checker__bad_va_T_3; // @[TLB.scala:560:{71,86}]
wire pma_checker__bad_va_T_5 = pma_checker__bad_va_T_2 | pma_checker__bad_va_T_4; // @[TLB.scala:560:{51,59,71}]
wire pma_checker__bad_va_T_6 = ~pma_checker__bad_va_T_5; // @[TLB.scala:560:{37,59}]
wire pma_checker__bad_va_T_7 = pma_checker__bad_va_T_6; // @[TLB.scala:560:{34,37}]
wire _GEN_27 = pma_checker_io_req_bits_cmd == 5'h6; // @[package.scala:16:47]
wire pma_checker__cmd_lrsc_T; // @[package.scala:16:47]
assign pma_checker__cmd_lrsc_T = _GEN_27; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_2; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_2 = _GEN_27; // @[package.scala:16:47]
wire _GEN_28 = pma_checker_io_req_bits_cmd == 5'h7; // @[package.scala:16:47]
wire pma_checker__cmd_lrsc_T_1; // @[package.scala:16:47]
assign pma_checker__cmd_lrsc_T_1 = _GEN_28; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_3; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_3 = _GEN_28; // @[package.scala:16:47]
wire pma_checker__cmd_write_T_3; // @[Consts.scala:90:66]
assign pma_checker__cmd_write_T_3 = _GEN_28; // @[package.scala:16:47]
wire pma_checker__cmd_lrsc_T_2 = pma_checker__cmd_lrsc_T | pma_checker__cmd_lrsc_T_1; // @[package.scala:16:47, :81:59]
wire _GEN_29 = pma_checker_io_req_bits_cmd == 5'h4; // @[package.scala:16:47]
wire pma_checker__cmd_amo_logical_T; // @[package.scala:16:47]
assign pma_checker__cmd_amo_logical_T = _GEN_29; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_7; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_7 = _GEN_29; // @[package.scala:16:47]
wire pma_checker__cmd_write_T_5; // @[package.scala:16:47]
assign pma_checker__cmd_write_T_5 = _GEN_29; // @[package.scala:16:47]
wire _GEN_30 = pma_checker_io_req_bits_cmd == 5'h9; // @[package.scala:16:47]
wire pma_checker__cmd_amo_logical_T_1; // @[package.scala:16:47]
assign pma_checker__cmd_amo_logical_T_1 = _GEN_30; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_8; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_8 = _GEN_30; // @[package.scala:16:47]
wire pma_checker__cmd_write_T_6; // @[package.scala:16:47]
assign pma_checker__cmd_write_T_6 = _GEN_30; // @[package.scala:16:47]
wire _GEN_31 = pma_checker_io_req_bits_cmd == 5'hA; // @[package.scala:16:47]
wire pma_checker__cmd_amo_logical_T_2; // @[package.scala:16:47]
assign pma_checker__cmd_amo_logical_T_2 = _GEN_31; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_9; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_9 = _GEN_31; // @[package.scala:16:47]
wire pma_checker__cmd_write_T_7; // @[package.scala:16:47]
assign pma_checker__cmd_write_T_7 = _GEN_31; // @[package.scala:16:47]
wire _GEN_32 = pma_checker_io_req_bits_cmd == 5'hB; // @[package.scala:16:47]
wire pma_checker__cmd_amo_logical_T_3; // @[package.scala:16:47]
assign pma_checker__cmd_amo_logical_T_3 = _GEN_32; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_10; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_10 = _GEN_32; // @[package.scala:16:47]
wire pma_checker__cmd_write_T_8; // @[package.scala:16:47]
assign pma_checker__cmd_write_T_8 = _GEN_32; // @[package.scala:16:47]
wire pma_checker__cmd_amo_logical_T_4 = pma_checker__cmd_amo_logical_T | pma_checker__cmd_amo_logical_T_1; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_amo_logical_T_5 = pma_checker__cmd_amo_logical_T_4 | pma_checker__cmd_amo_logical_T_2; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_amo_logical_T_6 = pma_checker__cmd_amo_logical_T_5 | pma_checker__cmd_amo_logical_T_3; // @[package.scala:16:47, :81:59]
wire _GEN_33 = pma_checker_io_req_bits_cmd == 5'h8; // @[package.scala:16:47]
wire pma_checker__cmd_amo_arithmetic_T; // @[package.scala:16:47]
assign pma_checker__cmd_amo_arithmetic_T = _GEN_33; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_14; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_14 = _GEN_33; // @[package.scala:16:47]
wire pma_checker__cmd_write_T_12; // @[package.scala:16:47]
assign pma_checker__cmd_write_T_12 = _GEN_33; // @[package.scala:16:47]
wire _GEN_34 = pma_checker_io_req_bits_cmd == 5'hC; // @[package.scala:16:47]
wire pma_checker__cmd_amo_arithmetic_T_1; // @[package.scala:16:47]
assign pma_checker__cmd_amo_arithmetic_T_1 = _GEN_34; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_15; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_15 = _GEN_34; // @[package.scala:16:47]
wire pma_checker__cmd_write_T_13; // @[package.scala:16:47]
assign pma_checker__cmd_write_T_13 = _GEN_34; // @[package.scala:16:47]
wire _GEN_35 = pma_checker_io_req_bits_cmd == 5'hD; // @[package.scala:16:47]
wire pma_checker__cmd_amo_arithmetic_T_2; // @[package.scala:16:47]
assign pma_checker__cmd_amo_arithmetic_T_2 = _GEN_35; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_16; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_16 = _GEN_35; // @[package.scala:16:47]
wire pma_checker__cmd_write_T_14; // @[package.scala:16:47]
assign pma_checker__cmd_write_T_14 = _GEN_35; // @[package.scala:16:47]
wire _GEN_36 = pma_checker_io_req_bits_cmd == 5'hE; // @[package.scala:16:47]
wire pma_checker__cmd_amo_arithmetic_T_3; // @[package.scala:16:47]
assign pma_checker__cmd_amo_arithmetic_T_3 = _GEN_36; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_17; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_17 = _GEN_36; // @[package.scala:16:47]
wire pma_checker__cmd_write_T_15; // @[package.scala:16:47]
assign pma_checker__cmd_write_T_15 = _GEN_36; // @[package.scala:16:47]
wire _GEN_37 = pma_checker_io_req_bits_cmd == 5'hF; // @[package.scala:16:47]
wire pma_checker__cmd_amo_arithmetic_T_4; // @[package.scala:16:47]
assign pma_checker__cmd_amo_arithmetic_T_4 = _GEN_37; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_18; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_18 = _GEN_37; // @[package.scala:16:47]
wire pma_checker__cmd_write_T_16; // @[package.scala:16:47]
assign pma_checker__cmd_write_T_16 = _GEN_37; // @[package.scala:16:47]
wire pma_checker__cmd_amo_arithmetic_T_5 = pma_checker__cmd_amo_arithmetic_T | pma_checker__cmd_amo_arithmetic_T_1; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_amo_arithmetic_T_6 = pma_checker__cmd_amo_arithmetic_T_5 | pma_checker__cmd_amo_arithmetic_T_2; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_amo_arithmetic_T_7 = pma_checker__cmd_amo_arithmetic_T_6 | pma_checker__cmd_amo_arithmetic_T_3; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_amo_arithmetic_T_8 = pma_checker__cmd_amo_arithmetic_T_7 | pma_checker__cmd_amo_arithmetic_T_4; // @[package.scala:16:47, :81:59]
wire _GEN_38 = pma_checker_io_req_bits_cmd == 5'h11; // @[TLB.scala:573:41]
wire pma_checker_cmd_put_partial; // @[TLB.scala:573:41]
assign pma_checker_cmd_put_partial = _GEN_38; // @[TLB.scala:573:41]
wire pma_checker__cmd_write_T_1; // @[Consts.scala:90:49]
assign pma_checker__cmd_write_T_1 = _GEN_38; // @[TLB.scala:573:41]
wire pma_checker__cmd_read_T = pma_checker_io_req_bits_cmd == 5'h0; // @[package.scala:16:47]
wire _GEN_39 = pma_checker_io_req_bits_cmd == 5'h10; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_1; // @[package.scala:16:47]
assign pma_checker__cmd_read_T_1 = _GEN_39; // @[package.scala:16:47]
wire pma_checker__cmd_readx_T; // @[TLB.scala:575:56]
assign pma_checker__cmd_readx_T = _GEN_39; // @[package.scala:16:47]
wire pma_checker__cmd_read_T_4 = pma_checker__cmd_read_T | pma_checker__cmd_read_T_1; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_read_T_5 = pma_checker__cmd_read_T_4 | pma_checker__cmd_read_T_2; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_read_T_6 = pma_checker__cmd_read_T_5 | pma_checker__cmd_read_T_3; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_read_T_11 = pma_checker__cmd_read_T_7 | pma_checker__cmd_read_T_8; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_read_T_12 = pma_checker__cmd_read_T_11 | pma_checker__cmd_read_T_9; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_read_T_13 = pma_checker__cmd_read_T_12 | pma_checker__cmd_read_T_10; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_read_T_19 = pma_checker__cmd_read_T_14 | pma_checker__cmd_read_T_15; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_read_T_20 = pma_checker__cmd_read_T_19 | pma_checker__cmd_read_T_16; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_read_T_21 = pma_checker__cmd_read_T_20 | pma_checker__cmd_read_T_17; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_read_T_22 = pma_checker__cmd_read_T_21 | pma_checker__cmd_read_T_18; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_read_T_23 = pma_checker__cmd_read_T_13 | pma_checker__cmd_read_T_22; // @[package.scala:81:59]
wire pma_checker_cmd_read = pma_checker__cmd_read_T_6 | pma_checker__cmd_read_T_23; // @[package.scala:81:59]
wire pma_checker__cmd_write_T = pma_checker_io_req_bits_cmd == 5'h1; // @[DCache.scala:120:32]
wire pma_checker__cmd_write_T_2 = pma_checker__cmd_write_T | pma_checker__cmd_write_T_1; // @[Consts.scala:90:{32,42,49}]
wire pma_checker__cmd_write_T_4 = pma_checker__cmd_write_T_2 | pma_checker__cmd_write_T_3; // @[Consts.scala:90:{42,59,66}]
wire pma_checker__cmd_write_T_9 = pma_checker__cmd_write_T_5 | pma_checker__cmd_write_T_6; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_write_T_10 = pma_checker__cmd_write_T_9 | pma_checker__cmd_write_T_7; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_write_T_11 = pma_checker__cmd_write_T_10 | pma_checker__cmd_write_T_8; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_write_T_17 = pma_checker__cmd_write_T_12 | pma_checker__cmd_write_T_13; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_write_T_18 = pma_checker__cmd_write_T_17 | pma_checker__cmd_write_T_14; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_write_T_19 = pma_checker__cmd_write_T_18 | pma_checker__cmd_write_T_15; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_write_T_20 = pma_checker__cmd_write_T_19 | pma_checker__cmd_write_T_16; // @[package.scala:16:47, :81:59]
wire pma_checker__cmd_write_T_21 = pma_checker__cmd_write_T_11 | pma_checker__cmd_write_T_20; // @[package.scala:81:59]
wire pma_checker_cmd_write = pma_checker__cmd_write_T_4 | pma_checker__cmd_write_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire pma_checker__cmd_write_perms_T = pma_checker_io_req_bits_cmd == 5'h5; // @[package.scala:16:47]
wire pma_checker__cmd_write_perms_T_1 = pma_checker_io_req_bits_cmd == 5'h17; // @[package.scala:16:47]
wire pma_checker__cmd_write_perms_T_2 = pma_checker__cmd_write_perms_T | pma_checker__cmd_write_perms_T_1; // @[package.scala:16:47, :81:59]
wire pma_checker_cmd_write_perms = pma_checker_cmd_write | pma_checker__cmd_write_perms_T_2; // @[package.scala:81:59]
wire [13:0] pma_checker__ae_array_T = pma_checker_misaligned ? pma_checker_eff_array : 14'h0; // @[TLB.scala:535:22, :550:77, :582:8]
wire [13:0] pma_checker_ae_array = pma_checker__ae_array_T; // @[TLB.scala:582:{8,37}]
wire [13:0] pma_checker__ae_array_T_1 = ~pma_checker_lrscAllowed; // @[TLB.scala:580:24, :583:19]
wire [13:0] pma_checker__ae_ld_array_T = ~pma_checker_pr_array; // @[TLB.scala:529:87, :586:46]
wire [13:0] pma_checker__ae_ld_array_T_1 = pma_checker_ae_array | pma_checker__ae_ld_array_T; // @[TLB.scala:582:37, :586:{44,46}]
wire [13:0] pma_checker_ae_ld_array = pma_checker_cmd_read ? pma_checker__ae_ld_array_T_1 : 14'h0; // @[TLB.scala:586:{24,44}]
wire [13:0] pma_checker__ae_st_array_T = ~pma_checker_pw_array; // @[TLB.scala:531:87, :588:37]
wire [13:0] pma_checker__ae_st_array_T_1 = pma_checker_ae_array | pma_checker__ae_st_array_T; // @[TLB.scala:582:37, :588:{35,37}]
wire [13:0] pma_checker__ae_st_array_T_2 = pma_checker_cmd_write_perms ? pma_checker__ae_st_array_T_1 : 14'h0; // @[TLB.scala:577:35, :588:{8,35}]
wire [13:0] pma_checker__ae_st_array_T_3 = ~pma_checker_ppp_array_if_cached; // @[TLB.scala:544:39, :589:26]
wire [13:0] pma_checker__ae_st_array_T_4 = pma_checker_cmd_put_partial ? pma_checker__ae_st_array_T_3 : 14'h0; // @[TLB.scala:573:41, :589:{8,26}]
wire [13:0] pma_checker__ae_st_array_T_5 = pma_checker__ae_st_array_T_2 | pma_checker__ae_st_array_T_4; // @[TLB.scala:588:{8,53}, :589:8]
wire [13:0] pma_checker__ae_st_array_T_8 = pma_checker__ae_st_array_T_5; // @[TLB.scala:588:53, :589:53]
wire [13:0] pma_checker__ae_st_array_T_6 = ~pma_checker_pal_array_if_cached; // @[TLB.scala:546:39, :590:26]
wire [13:0] pma_checker_ae_st_array = pma_checker__ae_st_array_T_8; // @[TLB.scala:589:53, :590:53]
wire [13:0] pma_checker__ae_st_array_T_9 = ~pma_checker_paa_array_if_cached; // @[TLB.scala:545:39, :591:29]
wire [13:0] pma_checker__must_alloc_array_T = ~pma_checker_ppp_array; // @[TLB.scala:539:22, :593:26]
wire [13:0] pma_checker__must_alloc_array_T_1 = pma_checker_cmd_put_partial ? pma_checker__must_alloc_array_T : 14'h0; // @[TLB.scala:573:41, :593:{8,26}]
wire [13:0] pma_checker__must_alloc_array_T_4 = pma_checker__must_alloc_array_T_1; // @[TLB.scala:593:{8,43}]
wire [13:0] pma_checker__must_alloc_array_T_2 = ~pma_checker_pal_array; // @[TLB.scala:543:22, :594:26]
wire [13:0] pma_checker__must_alloc_array_T_7 = pma_checker__must_alloc_array_T_4; // @[TLB.scala:593:43, :594:43]
wire [13:0] pma_checker__must_alloc_array_T_5 = ~pma_checker_paa_array; // @[TLB.scala:541:22, :595:29]
wire [13:0] pma_checker_must_alloc_array = pma_checker__must_alloc_array_T_7; // @[TLB.scala:594:43, :595:46]
wire [13:0] pma_checker__pf_ld_array_T_1 = ~pma_checker__pf_ld_array_T; // @[TLB.scala:597:{37,41}]
wire [13:0] pma_checker__pf_ld_array_T_2 = ~pma_checker_ptw_ae_array; // @[TLB.scala:506:25, :597:73]
wire [13:0] pma_checker__pf_ld_array_T_3 = pma_checker__pf_ld_array_T_1 & pma_checker__pf_ld_array_T_2; // @[TLB.scala:597:{37,71,73}]
wire [13:0] pma_checker__pf_ld_array_T_4 = pma_checker__pf_ld_array_T_3 | pma_checker_ptw_pf_array; // @[TLB.scala:508:25, :597:{71,88}]
wire [13:0] pma_checker__pf_ld_array_T_5 = ~pma_checker_ptw_gf_array; // @[TLB.scala:509:25, :597:106]
wire [13:0] pma_checker__pf_ld_array_T_6 = pma_checker__pf_ld_array_T_4 & pma_checker__pf_ld_array_T_5; // @[TLB.scala:597:{88,104,106}]
wire [13:0] pma_checker_pf_ld_array = pma_checker_cmd_read ? pma_checker__pf_ld_array_T_6 : 14'h0; // @[TLB.scala:597:{24,104}]
wire [13:0] pma_checker__pf_st_array_T = ~pma_checker_w_array; // @[TLB.scala:521:20, :598:44]
wire [13:0] pma_checker__pf_st_array_T_1 = ~pma_checker_ptw_ae_array; // @[TLB.scala:506:25, :597:73, :598:55]
wire [13:0] pma_checker__pf_st_array_T_2 = pma_checker__pf_st_array_T & pma_checker__pf_st_array_T_1; // @[TLB.scala:598:{44,53,55}]
wire [13:0] pma_checker__pf_st_array_T_3 = pma_checker__pf_st_array_T_2 | pma_checker_ptw_pf_array; // @[TLB.scala:508:25, :598:{53,70}]
wire [13:0] pma_checker__pf_st_array_T_4 = ~pma_checker_ptw_gf_array; // @[TLB.scala:509:25, :597:106, :598:88]
wire [13:0] pma_checker__pf_st_array_T_5 = pma_checker__pf_st_array_T_3 & pma_checker__pf_st_array_T_4; // @[TLB.scala:598:{70,86,88}]
wire [13:0] pma_checker_pf_st_array = pma_checker_cmd_write_perms ? pma_checker__pf_st_array_T_5 : 14'h0; // @[TLB.scala:577:35, :598:{24,86}]
wire [13:0] pma_checker__pf_inst_array_T = ~pma_checker_x_array; // @[TLB.scala:522:20, :599:25]
wire [13:0] pma_checker__pf_inst_array_T_1 = ~pma_checker_ptw_ae_array; // @[TLB.scala:506:25, :597:73, :599:36]
wire [13:0] pma_checker__pf_inst_array_T_2 = pma_checker__pf_inst_array_T & pma_checker__pf_inst_array_T_1; // @[TLB.scala:599:{25,34,36}]
wire [13:0] pma_checker__pf_inst_array_T_3 = pma_checker__pf_inst_array_T_2 | pma_checker_ptw_pf_array; // @[TLB.scala:508:25, :599:{34,51}]
wire [13:0] pma_checker__pf_inst_array_T_4 = ~pma_checker_ptw_gf_array; // @[TLB.scala:509:25, :597:106, :599:69]
wire [13:0] pma_checker_pf_inst_array = pma_checker__pf_inst_array_T_3 & pma_checker__pf_inst_array_T_4; // @[TLB.scala:599:{51,67,69}]
wire [13:0] pma_checker__gf_ld_array_T_4 = ~pma_checker_ptw_ae_array; // @[TLB.scala:506:25, :597:73, :600:100]
wire [13:0] pma_checker__gf_ld_array_T_5 = pma_checker__gf_ld_array_T_3 & pma_checker__gf_ld_array_T_4; // @[TLB.scala:600:{82,98,100}]
wire [13:0] pma_checker__gf_st_array_T_3 = ~pma_checker_ptw_ae_array; // @[TLB.scala:506:25, :597:73, :601:81]
wire [13:0] pma_checker__gf_st_array_T_4 = pma_checker__gf_st_array_T_2 & pma_checker__gf_st_array_T_3; // @[TLB.scala:601:{63,79,81}]
wire [13:0] pma_checker__gf_inst_array_T_2 = ~pma_checker_ptw_ae_array; // @[TLB.scala:506:25, :597:73, :602:64]
wire [13:0] pma_checker__gf_inst_array_T_3 = pma_checker__gf_inst_array_T_1 & pma_checker__gf_inst_array_T_2; // @[TLB.scala:602:{46,62,64}]
wire pma_checker__gpa_hits_hit_mask_T = pma_checker_vpn == 27'h0; // @[TLB.scala:335:30, :606:73]
wire [13:0] pma_checker__io_resp_pf_ld_T_1 = pma_checker_pf_ld_array & 14'h2000; // @[TLB.scala:597:24, :633:57]
wire pma_checker__io_resp_pf_ld_T_2 = |pma_checker__io_resp_pf_ld_T_1; // @[TLB.scala:633:{57,65}]
assign pma_checker__io_resp_pf_ld_T_3 = pma_checker__io_resp_pf_ld_T_2; // @[TLB.scala:633:{41,65}]
assign pma_checker_io_resp_pf_ld = pma_checker__io_resp_pf_ld_T_3; // @[TLB.scala:633:41]
wire [13:0] pma_checker__io_resp_pf_st_T_1 = pma_checker_pf_st_array & 14'h2000; // @[TLB.scala:598:24, :634:64]
wire pma_checker__io_resp_pf_st_T_2 = |pma_checker__io_resp_pf_st_T_1; // @[TLB.scala:634:{64,72}]
assign pma_checker__io_resp_pf_st_T_3 = pma_checker__io_resp_pf_st_T_2; // @[TLB.scala:634:{48,72}]
assign pma_checker_io_resp_pf_st = pma_checker__io_resp_pf_st_T_3; // @[TLB.scala:634:48]
wire [13:0] pma_checker__io_resp_pf_inst_T = pma_checker_pf_inst_array & 14'h2000; // @[TLB.scala:599:67, :635:47]
wire pma_checker__io_resp_pf_inst_T_1 = |pma_checker__io_resp_pf_inst_T; // @[TLB.scala:635:{47,55}]
assign pma_checker__io_resp_pf_inst_T_2 = pma_checker__io_resp_pf_inst_T_1; // @[TLB.scala:635:{29,55}]
assign pma_checker_io_resp_pf_inst = pma_checker__io_resp_pf_inst_T_2; // @[TLB.scala:635:29]
wire [13:0] pma_checker__io_resp_ae_ld_T = pma_checker_ae_ld_array & 14'h2000; // @[TLB.scala:586:24, :641:33]
assign pma_checker__io_resp_ae_ld_T_1 = |pma_checker__io_resp_ae_ld_T; // @[TLB.scala:641:{33,41}]
assign pma_checker_io_resp_ae_ld = pma_checker__io_resp_ae_ld_T_1; // @[TLB.scala:641:41]
wire [13:0] pma_checker__io_resp_ae_st_T = pma_checker_ae_st_array & 14'h2000; // @[TLB.scala:590:53, :642:33]
assign pma_checker__io_resp_ae_st_T_1 = |pma_checker__io_resp_ae_st_T; // @[TLB.scala:642:{33,41}]
assign pma_checker_io_resp_ae_st = pma_checker__io_resp_ae_st_T_1; // @[TLB.scala:642:41]
wire [13:0] pma_checker__io_resp_ae_inst_T = ~pma_checker_px_array; // @[TLB.scala:533:87, :643:23]
wire [13:0] pma_checker__io_resp_ae_inst_T_1 = pma_checker__io_resp_ae_inst_T & 14'h2000; // @[TLB.scala:643:{23,33}]
assign pma_checker__io_resp_ae_inst_T_2 = |pma_checker__io_resp_ae_inst_T_1; // @[TLB.scala:643:{33,41}]
assign pma_checker_io_resp_ae_inst = pma_checker__io_resp_ae_inst_T_2; // @[TLB.scala:643:41]
assign pma_checker__io_resp_ma_ld_T = pma_checker_misaligned & pma_checker_cmd_read; // @[TLB.scala:550:77, :645:31]
assign pma_checker_io_resp_ma_ld = pma_checker__io_resp_ma_ld_T; // @[TLB.scala:645:31]
assign pma_checker__io_resp_ma_st_T = pma_checker_misaligned & pma_checker_cmd_write; // @[TLB.scala:550:77, :646:31]
assign pma_checker_io_resp_ma_st = pma_checker__io_resp_ma_st_T; // @[TLB.scala:646:31]
wire [13:0] pma_checker__io_resp_cacheable_T = pma_checker_c_array & 14'h2000; // @[TLB.scala:537:20, :648:33]
assign pma_checker__io_resp_cacheable_T_1 = |pma_checker__io_resp_cacheable_T; // @[TLB.scala:648:{33,41}]
assign pma_checker_io_resp_cacheable = pma_checker__io_resp_cacheable_T_1; // @[TLB.scala:648:41]
wire [13:0] pma_checker__io_resp_must_alloc_T = pma_checker_must_alloc_array & 14'h2000; // @[TLB.scala:595:46, :649:43]
assign pma_checker__io_resp_must_alloc_T_1 = |pma_checker__io_resp_must_alloc_T; // @[TLB.scala:649:{43,51}]
assign pma_checker_io_resp_must_alloc = pma_checker__io_resp_must_alloc_T_1; // @[TLB.scala:649:51]
wire [13:0] pma_checker__io_resp_prefetchable_T = pma_checker_prefetchable_array & 14'h2000; // @[TLB.scala:547:31, :650:47]
wire pma_checker__io_resp_prefetchable_T_1 = |pma_checker__io_resp_prefetchable_T; // @[TLB.scala:650:{47,55}]
assign pma_checker__io_resp_prefetchable_T_2 = pma_checker__io_resp_prefetchable_T_1; // @[TLB.scala:650:{55,59}]
assign pma_checker_io_resp_prefetchable = pma_checker__io_resp_prefetchable_T_2; // @[TLB.scala:650:59]
assign pma_checker__io_resp_paddr_T_1 = {pma_checker_ppn, pma_checker__io_resp_paddr_T}; // @[Mux.scala:30:73]
assign pma_checker_io_resp_paddr = pma_checker__io_resp_paddr_T_1; // @[TLB.scala:652:23]
wire [27:0] pma_checker__io_resp_gpa_page_T_1 = {1'h0, pma_checker_vpn}; // @[TLB.scala:335:30, :657:36]
wire [27:0] pma_checker_io_resp_gpa_page = pma_checker__io_resp_gpa_page_T_1; // @[TLB.scala:657:{19,36}]
wire [11:0] pma_checker_io_resp_gpa_offset = pma_checker__io_resp_gpa_offset_T_1; // @[TLB.scala:658:{21,82}]
assign pma_checker__io_resp_gpa_T = {pma_checker_io_resp_gpa_page, pma_checker_io_resp_gpa_offset}; // @[TLB.scala:657:19, :658:21, :659:8]
assign pma_checker_io_resp_gpa = pma_checker__io_resp_gpa_T; // @[TLB.scala:659:8]
wire pma_checker_ignore_1 = pma_checker__ignore_T_1; // @[TLB.scala:182:{28,34}]
wire pma_checker_ignore_4 = pma_checker__ignore_T_4; // @[TLB.scala:182:{28,34}]
wire pma_checker_ignore_7 = pma_checker__ignore_T_7; // @[TLB.scala:182:{28,34}]
wire pma_checker_ignore_10 = pma_checker__ignore_T_10; // @[TLB.scala:182:{28,34}]
wire replace; // @[Replacement.scala:37:29]
wire [1:0] lfsr_lo_lo_lo = {_lfsr_prng_io_out_1, _lfsr_prng_io_out_0}; // @[PRNG.scala:91:22, :95:17]
wire [1:0] lfsr_lo_lo_hi = {_lfsr_prng_io_out_3, _lfsr_prng_io_out_2}; // @[PRNG.scala:91:22, :95:17]
wire [3:0] lfsr_lo_lo = {lfsr_lo_lo_hi, lfsr_lo_lo_lo}; // @[PRNG.scala:95:17]
wire [1:0] lfsr_lo_hi_lo = {_lfsr_prng_io_out_5, _lfsr_prng_io_out_4}; // @[PRNG.scala:91:22, :95:17]
wire [1:0] lfsr_lo_hi_hi = {_lfsr_prng_io_out_7, _lfsr_prng_io_out_6}; // @[PRNG.scala:91:22, :95:17]
wire [3:0] lfsr_lo_hi = {lfsr_lo_hi_hi, lfsr_lo_hi_lo}; // @[PRNG.scala:95:17]
wire [7:0] lfsr_lo = {lfsr_lo_hi, lfsr_lo_lo}; // @[PRNG.scala:95:17]
wire [1:0] lfsr_hi_lo_lo = {_lfsr_prng_io_out_9, _lfsr_prng_io_out_8}; // @[PRNG.scala:91:22, :95:17]
wire [1:0] lfsr_hi_lo_hi = {_lfsr_prng_io_out_11, _lfsr_prng_io_out_10}; // @[PRNG.scala:91:22, :95:17]
wire [3:0] lfsr_hi_lo = {lfsr_hi_lo_hi, lfsr_hi_lo_lo}; // @[PRNG.scala:95:17]
wire [1:0] lfsr_hi_hi_lo = {_lfsr_prng_io_out_13, _lfsr_prng_io_out_12}; // @[PRNG.scala:91:22, :95:17]
wire [1:0] lfsr_hi_hi_hi = {_lfsr_prng_io_out_15, _lfsr_prng_io_out_14}; // @[PRNG.scala:91:22, :95:17]
wire [3:0] lfsr_hi_hi = {lfsr_hi_hi_hi, lfsr_hi_hi_lo}; // @[PRNG.scala:95:17]
wire [7:0] lfsr_hi = {lfsr_hi_hi, lfsr_hi_lo}; // @[PRNG.scala:95:17]
wire [15:0] lfsr = {lfsr_hi, lfsr_lo}; // @[PRNG.scala:95:17]
wire metaArb__grant_T = metaArb_io_in_0_valid; // @[Arbiter.scala:45:68]
wire [39:0] _metaArb_io_in_5_bits_addr_T_2; // @[DCache.scala:1018:36]
wire [1:0] _metaArb_io_in_5_bits_idx_T; // @[DCache.scala:1017:44]
wire metaArb__io_in_1_ready_T; // @[Arbiter.scala:153:19]
wire [39:0] _metaArb_io_in_1_bits_addr_T_2; // @[DCache.scala:454:36]
wire [1:0] _metaArb_io_in_1_bits_idx_T_2; // @[DCache.scala:453:35]
wire [25:0] _metaArb_io_in_1_bits_data_T; // @[DCache.scala:458:14]
wire metaArb__io_in_2_ready_T; // @[Arbiter.scala:153:19]
wire _metaArb_io_in_2_valid_T; // @[DCache.scala:462:63]
wire [39:0] _metaArb_io_in_2_bits_addr_T_2; // @[DCache.scala:466:36]
wire [1:0] _metaArb_io_in_2_bits_idx_T; // @[DCache.scala:465:40]
wire [3:0] s2_victim_or_hit_way; // @[DCache.scala:432:33]
wire [25:0] _metaArb_io_in_2_bits_data_T_1; // @[DCache.scala:467:97]
wire metaArb__io_in_3_ready_T; // @[Arbiter.scala:153:19]
wire _metaArb_io_in_3_valid_T_2; // @[DCache.scala:741:53]
wire [39:0] _metaArb_io_in_3_bits_addr_T_2; // @[DCache.scala:745:36]
wire [1:0] _metaArb_io_in_3_bits_idx_T; // @[DCache.scala:744:40]
wire [25:0] _metaArb_io_in_3_bits_data_T_18; // @[DCache.scala:746:134]
wire metaArb__io_in_4_ready_T; // @[Arbiter.scala:153:19]
wire _metaArb_io_in_4_valid_T_2; // @[package.scala:81:59]
wire [39:0] _metaArb_io_in_4_bits_addr_T_2; // @[DCache.scala:912:36]
wire [1:0] _metaArb_io_in_4_bits_idx_T; // @[DCache.scala:1200:47]
wire [3:0] releaseWay; // @[DCache.scala:232:24]
wire [25:0] _metaArb_io_in_4_bits_data_T_1; // @[DCache.scala:913:97]
wire metaArb__io_in_5_ready_T; // @[Arbiter.scala:153:19]
wire metaArb__io_in_6_ready_T; // @[Arbiter.scala:153:19]
wire metaArb__io_in_7_ready_T; // @[Arbiter.scala:153:19]
wire [1:0] _metaArb_io_in_7_bits_idx_T; // @[DCache.scala:263:58]
wire metaArb__io_out_valid_T_1; // @[Arbiter.scala:154:31]
wire [1:0] _s1_meta_WIRE = metaArb_io_out_bits_idx; // @[DCache.scala:135:28, :314:35]
wire [39:0] metaArb_io_in_0_bits_addr; // @[DCache.scala:135:28]
wire [1:0] metaArb_io_in_0_bits_idx; // @[DCache.scala:135:28]
wire [39:0] metaArb_io_in_1_bits_addr; // @[DCache.scala:135:28]
wire [1:0] metaArb_io_in_1_bits_idx; // @[DCache.scala:135:28]
wire [25:0] metaArb_io_in_1_bits_data; // @[DCache.scala:135:28]
wire metaArb_io_in_1_ready; // @[DCache.scala:135:28]
wire [39:0] metaArb_io_in_2_bits_addr; // @[DCache.scala:135:28]
wire [1:0] metaArb_io_in_2_bits_idx; // @[DCache.scala:135:28]
wire [3:0] metaArb_io_in_2_bits_way_en; // @[DCache.scala:135:28]
wire [25:0] metaArb_io_in_2_bits_data; // @[DCache.scala:135:28]
wire metaArb_io_in_2_ready; // @[DCache.scala:135:28]
wire metaArb_io_in_2_valid; // @[DCache.scala:135:28]
wire [39:0] metaArb_io_in_3_bits_addr; // @[DCache.scala:135:28]
wire [1:0] metaArb_io_in_3_bits_idx; // @[DCache.scala:135:28]
wire [3:0] metaArb_io_in_3_bits_way_en; // @[DCache.scala:135:28]
wire [25:0] metaArb_io_in_3_bits_data; // @[DCache.scala:135:28]
wire metaArb_io_in_3_ready; // @[DCache.scala:135:28]
wire metaArb_io_in_3_valid; // @[DCache.scala:135:28]
wire [39:0] metaArb_io_in_4_bits_addr; // @[DCache.scala:135:28]
wire [1:0] metaArb_io_in_4_bits_idx; // @[DCache.scala:135:28]
wire [3:0] metaArb_io_in_4_bits_way_en; // @[DCache.scala:135:28]
wire [25:0] metaArb_io_in_4_bits_data; // @[DCache.scala:135:28]
wire metaArb_io_in_4_ready; // @[DCache.scala:135:28]
wire metaArb_io_in_4_valid; // @[DCache.scala:135:28]
wire [39:0] metaArb_io_in_5_bits_addr; // @[DCache.scala:135:28]
wire [1:0] metaArb_io_in_5_bits_idx; // @[DCache.scala:135:28]
wire [3:0] metaArb_io_in_5_bits_way_en; // @[DCache.scala:135:28]
wire [25:0] metaArb_io_in_5_bits_data; // @[DCache.scala:135:28]
wire metaArb_io_in_5_ready; // @[DCache.scala:135:28]
wire [39:0] metaArb_io_in_6_bits_addr; // @[DCache.scala:135:28]
wire [1:0] metaArb_io_in_6_bits_idx; // @[DCache.scala:135:28]
wire [3:0] metaArb_io_in_6_bits_way_en; // @[DCache.scala:135:28]
wire [25:0] metaArb_io_in_6_bits_data; // @[DCache.scala:135:28]
wire metaArb_io_in_6_ready; // @[DCache.scala:135:28]
wire metaArb_io_in_6_valid; // @[DCache.scala:135:28]
wire [1:0] metaArb_io_in_7_bits_idx; // @[DCache.scala:135:28]
wire [3:0] metaArb_io_in_7_bits_way_en; // @[DCache.scala:135:28]
wire [25:0] metaArb_io_in_7_bits_data; // @[DCache.scala:135:28]
wire metaArb_io_in_7_ready; // @[DCache.scala:135:28]
wire metaArb_io_out_bits_write; // @[DCache.scala:135:28]
wire [39:0] metaArb_io_out_bits_addr; // @[DCache.scala:135:28]
wire [3:0] metaArb_io_out_bits_way_en; // @[DCache.scala:135:28]
wire [25:0] metaArb_io_out_bits_data; // @[DCache.scala:135:28]
wire metaArb_io_out_valid; // @[DCache.scala:135:28]
wire [2:0] metaArb_io_chosen; // @[DCache.scala:135:28]
assign metaArb_io_chosen = metaArb_io_in_0_valid ? 3'h0 : metaArb_io_in_2_valid ? 3'h2 : metaArb_io_in_3_valid ? 3'h3 : metaArb_io_in_4_valid ? 3'h4 : {2'h3, ~metaArb_io_in_6_valid}; // @[Arbiter.scala:142:13, :145:26, :146:17]
assign metaArb_io_out_bits_write = metaArb_io_in_0_valid | metaArb_io_in_2_valid | metaArb_io_in_3_valid | metaArb_io_in_4_valid; // @[Arbiter.scala:145:26, :147:19]
assign metaArb_io_out_bits_addr = metaArb_io_in_0_valid ? metaArb_io_in_0_bits_addr : metaArb_io_in_2_valid ? metaArb_io_in_2_bits_addr : metaArb_io_in_3_valid ? metaArb_io_in_3_bits_addr : metaArb_io_in_4_valid ? metaArb_io_in_4_bits_addr : metaArb_io_in_6_valid ? metaArb_io_in_6_bits_addr : metaArb_io_in_7_bits_addr; // @[Arbiter.scala:143:15, :145:26, :147:19]
assign metaArb_io_out_bits_idx = metaArb_io_in_0_valid ? metaArb_io_in_0_bits_idx : metaArb_io_in_2_valid ? metaArb_io_in_2_bits_idx : metaArb_io_in_3_valid ? metaArb_io_in_3_bits_idx : metaArb_io_in_4_valid ? metaArb_io_in_4_bits_idx : metaArb_io_in_6_valid ? metaArb_io_in_6_bits_idx : metaArb_io_in_7_bits_idx; // @[Arbiter.scala:143:15, :145:26, :147:19]
assign metaArb_io_out_bits_way_en = metaArb_io_in_0_valid ? 4'hF : metaArb_io_in_2_valid ? metaArb_io_in_2_bits_way_en : metaArb_io_in_3_valid ? metaArb_io_in_3_bits_way_en : metaArb_io_in_4_valid ? metaArb_io_in_4_bits_way_en : metaArb_io_in_6_valid ? metaArb_io_in_6_bits_way_en : metaArb_io_in_7_bits_way_en; // @[Arbiter.scala:143:15, :145:26, :147:19]
assign metaArb_io_out_bits_data = metaArb_io_in_0_valid ? 26'h0 : metaArb_io_in_2_valid ? metaArb_io_in_2_bits_data : metaArb_io_in_3_valid ? metaArb_io_in_3_bits_data : metaArb_io_in_4_valid ? metaArb_io_in_4_bits_data : metaArb_io_in_6_valid ? metaArb_io_in_6_bits_data : metaArb_io_in_7_bits_data; // @[Arbiter.scala:143:15, :145:26, :147:19]
wire metaArb__grant_T_1 = metaArb__grant_T | metaArb_io_in_2_valid; // @[Arbiter.scala:45:68]
wire metaArb__grant_T_2 = metaArb__grant_T_1 | metaArb_io_in_3_valid; // @[Arbiter.scala:45:68]
wire metaArb__grant_T_3 = metaArb__grant_T_2 | metaArb_io_in_4_valid; // @[Arbiter.scala:45:68]
wire metaArb__grant_T_4 = metaArb__grant_T_3; // @[Arbiter.scala:45:68]
wire metaArb__grant_T_5 = metaArb__grant_T_4 | metaArb_io_in_6_valid; // @[Arbiter.scala:45:68]
wire metaArb_grant_1 = ~metaArb_io_in_0_valid; // @[Arbiter.scala:45:78]
assign metaArb__io_in_1_ready_T = metaArb_grant_1; // @[Arbiter.scala:45:78, :153:19]
wire metaArb_grant_2 = ~metaArb__grant_T; // @[Arbiter.scala:45:{68,78}]
assign metaArb__io_in_2_ready_T = metaArb_grant_2; // @[Arbiter.scala:45:78, :153:19]
wire metaArb_grant_3 = ~metaArb__grant_T_1; // @[Arbiter.scala:45:{68,78}]
assign metaArb__io_in_3_ready_T = metaArb_grant_3; // @[Arbiter.scala:45:78, :153:19]
wire metaArb_grant_4 = ~metaArb__grant_T_2; // @[Arbiter.scala:45:{68,78}]
assign metaArb__io_in_4_ready_T = metaArb_grant_4; // @[Arbiter.scala:45:78, :153:19]
wire metaArb_grant_5 = ~metaArb__grant_T_3; // @[Arbiter.scala:45:{68,78}]
assign metaArb__io_in_5_ready_T = metaArb_grant_5; // @[Arbiter.scala:45:78, :153:19]
wire metaArb_grant_6 = ~metaArb__grant_T_4; // @[Arbiter.scala:45:{68,78}]
assign metaArb__io_in_6_ready_T = metaArb_grant_6; // @[Arbiter.scala:45:78, :153:19]
wire metaArb_grant_7 = ~metaArb__grant_T_5; // @[Arbiter.scala:45:{68,78}]
assign metaArb__io_in_7_ready_T = metaArb_grant_7; // @[Arbiter.scala:45:78, :153:19]
assign metaArb_io_in_1_ready = metaArb__io_in_1_ready_T; // @[Arbiter.scala:153:19]
assign metaArb_io_in_2_ready = metaArb__io_in_2_ready_T; // @[Arbiter.scala:153:19]
assign metaArb_io_in_3_ready = metaArb__io_in_3_ready_T; // @[Arbiter.scala:153:19]
assign metaArb_io_in_4_ready = metaArb__io_in_4_ready_T; // @[Arbiter.scala:153:19]
assign metaArb_io_in_5_ready = metaArb__io_in_5_ready_T; // @[Arbiter.scala:153:19]
assign metaArb_io_in_6_ready = metaArb__io_in_6_ready_T; // @[Arbiter.scala:153:19]
assign metaArb_io_in_7_ready = metaArb__io_in_7_ready_T; // @[Arbiter.scala:153:19]
wire metaArb__io_out_valid_T = ~metaArb_grant_7; // @[Arbiter.scala:45:78, :154:19]
assign metaArb__io_out_valid_T_1 = metaArb__io_out_valid_T | metaArb_io_in_7_valid; // @[Arbiter.scala:154:{19,31}]
assign metaArb_io_out_valid = metaArb__io_out_valid_T_1; // @[Arbiter.scala:154:31]
wire _s1_meta_T_1; // @[DCache.scala:314:59]
wire wmask_0; // @[DCache.scala:311:74]
wire wmask_1; // @[DCache.scala:311:74]
wire wmask_2; // @[DCache.scala:311:74]
wire wmask_3; // @[DCache.scala:311:74]
wire [25:0] _s1_meta_uncorrected_WIRE = _rerocc_tile_dcache_tag_array_RW0_rdata[25:0]; // @[DescribedSRAM.scala:17:26]
wire [25:0] _s1_meta_uncorrected_WIRE_1 = _rerocc_tile_dcache_tag_array_RW0_rdata[51:26]; // @[DescribedSRAM.scala:17:26]
wire [25:0] _s1_meta_uncorrected_WIRE_2 = _rerocc_tile_dcache_tag_array_RW0_rdata[77:52]; // @[DescribedSRAM.scala:17:26]
wire [25:0] _s1_meta_uncorrected_WIRE_3 = _rerocc_tile_dcache_tag_array_RW0_rdata[103:78]; // @[DescribedSRAM.scala:17:26]
wire _dataArb_io_in_0_valid_T_12; // @[DCache.scala:516:27]
wire pstore_drain; // @[DCache.scala:516:27]
wire [63:0] _dataArb_io_in_0_bits_wdata_T_9; // @[package.scala:45:27]
wire [7:0] _dataArb_io_in_0_bits_eccMask_T_17; // @[package.scala:45:27]
wire [3:0] _dataArb_io_in_0_bits_way_en_T; // @[DCache.scala:550:38]
wire dataArb__io_in_1_ready_T; // @[Arbiter.scala:153:19]
wire [63:0] tl_d_data_encoded; // @[DCache.scala:324:31]
wire dataArb__io_in_2_ready_T; // @[Arbiter.scala:153:19]
wire _dataArb_io_in_2_valid_T_1; // @[DCache.scala:900:41]
wire [7:0] _dataArb_io_in_2_bits_addr_T_4; // @[DCache.scala:903:72]
wire dataArb__io_in_3_ready_T; // @[Arbiter.scala:153:19]
wire dataArb__io_out_valid_T_1; // @[Arbiter.scala:154:31]
wire [7:0] dataArb_io_in_0_bits_addr; // @[DCache.scala:152:28]
wire dataArb_io_in_0_bits_write; // @[DCache.scala:152:28]
wire [63:0] dataArb_io_in_0_bits_wdata; // @[DCache.scala:152:28]
wire dataArb_io_in_0_bits_wordMask; // @[DCache.scala:152:28]
wire [7:0] dataArb_io_in_0_bits_eccMask; // @[DCache.scala:152:28]
wire [3:0] dataArb_io_in_0_bits_way_en; // @[DCache.scala:152:28]
wire dataArb_io_in_0_valid; // @[DCache.scala:152:28]
wire [7:0] dataArb_io_in_1_bits_addr; // @[DCache.scala:152:28]
wire dataArb_io_in_1_bits_write; // @[DCache.scala:152:28]
wire [63:0] dataArb_io_in_1_bits_wdata; // @[DCache.scala:152:28]
wire [3:0] dataArb_io_in_1_bits_way_en; // @[DCache.scala:152:28]
wire dataArb_io_in_1_ready; // @[DCache.scala:152:28]
wire dataArb_io_in_1_valid; // @[DCache.scala:152:28]
wire [7:0] dataArb_io_in_2_bits_addr; // @[DCache.scala:152:28]
wire [63:0] dataArb_io_in_2_bits_wdata; // @[DCache.scala:152:28]
wire dataArb_io_in_2_ready; // @[DCache.scala:152:28]
wire dataArb_io_in_2_valid; // @[DCache.scala:152:28]
wire [7:0] dataArb_io_in_3_bits_addr; // @[DCache.scala:152:28]
wire [63:0] dataArb_io_in_3_bits_wdata; // @[DCache.scala:152:28]
wire dataArb_io_in_3_ready; // @[DCache.scala:152:28]
wire dataArb_io_in_3_valid; // @[DCache.scala:152:28]
wire [7:0] dataArb_io_out_bits_addr; // @[DCache.scala:152:28]
wire dataArb_io_out_bits_write; // @[DCache.scala:152:28]
wire [63:0] dataArb_io_out_bits_wdata; // @[DCache.scala:152:28]
wire dataArb_io_out_bits_wordMask; // @[DCache.scala:152:28]
wire [7:0] dataArb_io_out_bits_eccMask; // @[DCache.scala:152:28]
wire [3:0] dataArb_io_out_bits_way_en; // @[DCache.scala:152:28]
wire dataArb_io_out_valid; // @[DCache.scala:152:28]
wire [1:0] dataArb_io_chosen; // @[DCache.scala:152:28]
assign dataArb_io_chosen = dataArb_io_in_0_valid ? 2'h0 : dataArb_io_in_1_valid ? 2'h1 : {1'h1, ~dataArb_io_in_2_valid}; // @[Arbiter.scala:142:13, :145:26, :146:17]
assign dataArb_io_out_bits_addr = dataArb_io_in_0_valid ? dataArb_io_in_0_bits_addr : dataArb_io_in_1_valid ? dataArb_io_in_1_bits_addr : dataArb_io_in_2_valid ? dataArb_io_in_2_bits_addr : dataArb_io_in_3_bits_addr; // @[Arbiter.scala:143:15, :145:26, :147:19]
assign dataArb_io_out_bits_write = dataArb_io_in_0_valid ? dataArb_io_in_0_bits_write : dataArb_io_in_1_valid & dataArb_io_in_1_bits_write; // @[Arbiter.scala:145:26, :147:19]
assign dataArb_io_out_bits_wdata = dataArb_io_in_0_valid ? dataArb_io_in_0_bits_wdata : dataArb_io_in_1_valid ? dataArb_io_in_1_bits_wdata : dataArb_io_in_2_valid ? dataArb_io_in_2_bits_wdata : dataArb_io_in_3_bits_wdata; // @[Arbiter.scala:143:15, :145:26, :147:19]
assign dataArb_io_out_bits_wordMask = ~dataArb_io_in_0_valid | dataArb_io_in_0_bits_wordMask; // @[Arbiter.scala:145:26, :147:19]
assign dataArb_io_out_bits_eccMask = dataArb_io_in_0_valid ? dataArb_io_in_0_bits_eccMask : 8'hFF; // @[Arbiter.scala:145:26, :147:19]
assign dataArb_io_out_bits_way_en = dataArb_io_in_0_valid ? dataArb_io_in_0_bits_way_en : dataArb_io_in_1_valid ? dataArb_io_in_1_bits_way_en : 4'hF; // @[Arbiter.scala:145:26, :147:19]
wire dataArb__grant_T = dataArb_io_in_0_valid | dataArb_io_in_1_valid; // @[Arbiter.scala:45:68]
wire dataArb__grant_T_1 = dataArb__grant_T | dataArb_io_in_2_valid; // @[Arbiter.scala:45:68]
wire dataArb_grant_1 = ~dataArb_io_in_0_valid; // @[Arbiter.scala:45:78]
assign dataArb__io_in_1_ready_T = dataArb_grant_1; // @[Arbiter.scala:45:78, :153:19]
wire dataArb_grant_2 = ~dataArb__grant_T; // @[Arbiter.scala:45:{68,78}]
assign dataArb__io_in_2_ready_T = dataArb_grant_2; // @[Arbiter.scala:45:78, :153:19]
wire dataArb_grant_3 = ~dataArb__grant_T_1; // @[Arbiter.scala:45:{68,78}]
assign dataArb__io_in_3_ready_T = dataArb_grant_3; // @[Arbiter.scala:45:78, :153:19]
assign dataArb_io_in_1_ready = dataArb__io_in_1_ready_T; // @[Arbiter.scala:153:19]
assign dataArb_io_in_2_ready = dataArb__io_in_2_ready_T; // @[Arbiter.scala:153:19]
assign dataArb_io_in_3_ready = dataArb__io_in_3_ready_T; // @[Arbiter.scala:153:19]
wire dataArb__io_out_valid_T = ~dataArb_grant_3; // @[Arbiter.scala:45:78, :154:19]
assign dataArb__io_out_valid_T_1 = dataArb__io_out_valid_T | dataArb_io_in_3_valid; // @[Arbiter.scala:154:{19,31}]
assign dataArb_io_out_valid = dataArb__io_out_valid_T_1; // @[Arbiter.scala:154:31]
wire _tl_out_a_valid_T_14; // @[DCache.scala:603:37]
assign nodeOut_a_deq_valid = tl_out_a_valid; // @[Decoupled.scala:356:21]
wire [2:0] _tl_out_a_bits_T_9_opcode; // @[DCache.scala:608:23]
assign nodeOut_a_deq_bits_opcode = tl_out_a_bits_opcode; // @[Decoupled.scala:356:21]
wire [2:0] _tl_out_a_bits_T_9_param; // @[DCache.scala:608:23]
assign nodeOut_a_deq_bits_param = tl_out_a_bits_param; // @[Decoupled.scala:356:21]
wire [3:0] _tl_out_a_bits_T_9_size; // @[DCache.scala:608:23]
assign nodeOut_a_deq_bits_size = tl_out_a_bits_size; // @[Decoupled.scala:356:21]
wire _tl_out_a_bits_T_9_source; // @[DCache.scala:608:23]
assign nodeOut_a_deq_bits_source = tl_out_a_bits_source; // @[Decoupled.scala:356:21]
wire [31:0] _tl_out_a_bits_T_9_address; // @[DCache.scala:608:23]
assign nodeOut_a_deq_bits_address = tl_out_a_bits_address; // @[Decoupled.scala:356:21]
wire [7:0] _tl_out_a_bits_T_9_mask; // @[DCache.scala:608:23]
assign nodeOut_a_deq_bits_mask = tl_out_a_bits_mask; // @[Decoupled.scala:356:21]
wire [63:0] _tl_out_a_bits_T_9_data; // @[DCache.scala:608:23]
assign nodeOut_a_deq_bits_data = tl_out_a_bits_data; // @[Decoupled.scala:356:21]
wire tl_out_a_ready; // @[DCache.scala:159:22]
assign tl_out_a_ready = nodeOut_a_deq_ready; // @[Decoupled.scala:356:21]
assign nodeOut_a_valid = nodeOut_a_deq_valid; // @[Decoupled.scala:356:21]
assign nodeOut_a_bits_opcode = nodeOut_a_deq_bits_opcode; // @[Decoupled.scala:356:21]
assign nodeOut_a_bits_param = nodeOut_a_deq_bits_param; // @[Decoupled.scala:356:21]
assign nodeOut_a_bits_size = nodeOut_a_deq_bits_size; // @[Decoupled.scala:356:21]
assign nodeOut_a_bits_source = nodeOut_a_deq_bits_source; // @[Decoupled.scala:356:21]
assign nodeOut_a_bits_address = nodeOut_a_deq_bits_address; // @[Decoupled.scala:356:21]
assign nodeOut_a_bits_mask = nodeOut_a_deq_bits_mask; // @[Decoupled.scala:356:21]
assign nodeOut_a_bits_data = nodeOut_a_deq_bits_data; // @[Decoupled.scala:356:21]
wire _s1_valid_T = io_cpu_req_ready_0 & io_cpu_req_valid_0; // @[Decoupled.scala:51:35]
reg s1_valid; // @[DCache.scala:182:25]
wire _GEN_40 = nodeOut_b_ready & nodeOut_b_valid; // @[Decoupled.scala:51:35]
wire _s1_probe_T; // @[Decoupled.scala:51:35]
assign _s1_probe_T = _GEN_40; // @[Decoupled.scala:51:35]
wire _probe_bits_T; // @[Decoupled.scala:51:35]
assign _probe_bits_T = _GEN_40; // @[Decoupled.scala:51:35]
reg s1_probe; // @[DCache.scala:183:25]
reg [2:0] probe_bits_opcode; // @[DCache.scala:184:29]
reg [1:0] probe_bits_param; // @[DCache.scala:184:29]
reg [3:0] probe_bits_size; // @[DCache.scala:184:29]
wire [3:0] nackResponseMessage_size = probe_bits_size; // @[Edges.scala:416:17]
wire [3:0] cleanReleaseMessage_size = probe_bits_size; // @[Edges.scala:416:17]
wire [3:0] dirtyReleaseMessage_size = probe_bits_size; // @[Edges.scala:433:17]
reg probe_bits_source; // @[DCache.scala:184:29]
assign nodeOut_c_bits_source = probe_bits_source; // @[DCache.scala:184:29]
wire nackResponseMessage_source = probe_bits_source; // @[Edges.scala:416:17]
wire cleanReleaseMessage_source = probe_bits_source; // @[Edges.scala:416:17]
wire dirtyReleaseMessage_source = probe_bits_source; // @[Edges.scala:433:17]
reg [31:0] probe_bits_address; // @[DCache.scala:184:29]
assign nodeOut_c_bits_address = probe_bits_address; // @[DCache.scala:184:29]
wire [31:0] nackResponseMessage_address = probe_bits_address; // @[Edges.scala:416:17]
wire [31:0] cleanReleaseMessage_address = probe_bits_address; // @[Edges.scala:416:17]
wire [31:0] dirtyReleaseMessage_address = probe_bits_address; // @[Edges.scala:433:17]
reg [7:0] probe_bits_mask; // @[DCache.scala:184:29]
reg [63:0] probe_bits_data; // @[DCache.scala:184:29]
reg probe_bits_corrupt; // @[DCache.scala:184:29]
wire s1_nack; // @[DCache.scala:185:28]
wire _s1_valid_masked_T = ~io_cpu_s1_kill_0; // @[DCache.scala:101:7, :186:37]
wire s1_valid_masked = s1_valid & _s1_valid_masked_T; // @[DCache.scala:182:25, :186:{34,37}]
wire _s1_valid_not_nacked_T = ~s1_nack; // @[DCache.scala:185:28, :187:41]
wire s1_valid_not_nacked = s1_valid & _s1_valid_not_nacked_T; // @[DCache.scala:182:25, :187:{38,41}]
wire _s0_clk_en_T = ~metaArb_io_out_bits_write; // @[DCache.scala:135:28, :190:43]
wire s0_clk_en = metaArb_io_out_valid & _s0_clk_en_T; // @[DCache.scala:135:28, :190:{40,43}]
wire _s1_tlb_req_T = s0_clk_en; // @[DCache.scala:190:40, :208:52]
wire [39:0] _s0_req_addr_T_2; // @[DCache.scala:193:21]
wire [39:0] s0_tlb_req_vaddr = s0_req_addr; // @[DCache.scala:192:24, :199:28]
wire [1:0] s0_tlb_req_prv = s0_req_dprv; // @[DCache.scala:192:24, :199:28]
wire s0_tlb_req_v = s0_req_dv; // @[DCache.scala:192:24, :199:28]
wire s0_tlb_req_passthrough = s0_req_phys; // @[DCache.scala:192:24, :199:28]
wire [33:0] _s0_req_addr_T = metaArb_io_out_bits_addr[39:6]; // @[DCache.scala:135:28, :193:47]
wire [5:0] _s0_req_addr_T_1 = io_cpu_req_bits_addr_0[5:0]; // @[DCache.scala:101:7, :193:84]
assign _s0_req_addr_T_2 = {_s0_req_addr_T, _s0_req_addr_T_1}; // @[DCache.scala:193:{21,47,84}]
assign s0_req_addr = _s0_req_addr_T_2; // @[DCache.scala:192:24, :193:21]
assign s0_req_phys = ~metaArb_io_in_7_ready | io_cpu_req_bits_phys_0; // @[DCache.scala:101:7, :135:28, :192:24, :195:{9,34,48}]
reg [39:0] s1_req_addr; // @[DCache.scala:196:25]
assign pma_checker_io_req_bits_vaddr = s1_req_addr; // @[DCache.scala:120:32, :196:25]
reg [7:0] s1_req_tag; // @[DCache.scala:196:25]
reg [4:0] s1_req_cmd; // @[DCache.scala:196:25]
assign pma_checker_io_req_bits_cmd = s1_req_cmd; // @[DCache.scala:120:32, :196:25]
reg [1:0] s1_req_size; // @[DCache.scala:196:25]
assign pma_checker_io_req_bits_size = s1_req_size; // @[DCache.scala:120:32, :196:25]
wire [1:0] s1_mask_xwr_size = s1_req_size; // @[DCache.scala:196:25]
reg s1_req_signed; // @[DCache.scala:196:25]
reg [1:0] s1_req_dprv; // @[DCache.scala:196:25]
assign pma_checker_io_req_bits_prv = s1_req_dprv; // @[DCache.scala:120:32, :196:25]
reg s1_req_dv; // @[DCache.scala:196:25]
assign pma_checker_io_req_bits_v = s1_req_dv; // @[DCache.scala:120:32, :196:25]
reg s1_req_phys; // @[DCache.scala:196:25]
reg s1_req_no_resp; // @[DCache.scala:196:25]
reg s1_req_no_alloc; // @[DCache.scala:196:25]
reg s1_req_no_xcpt; // @[DCache.scala:196:25]
reg [63:0] s1_req_data; // @[DCache.scala:196:25]
reg [7:0] s1_req_mask; // @[DCache.scala:196:25]
wire [31:0] _s1_vaddr_T = s1_req_addr[39:8]; // @[DCache.scala:196:25, :197:56]
wire [7:0] _s1_vaddr_T_1 = s1_req_addr[7:0]; // @[DCache.scala:196:25, :197:78]
wire [39:0] s1_vaddr = {_s1_vaddr_T, _s1_vaddr_T_1}; // @[DCache.scala:197:{21,56,78}]
reg [39:0] s1_tlb_req_vaddr; // @[DCache.scala:208:29]
reg s1_tlb_req_passthrough; // @[DCache.scala:208:29]
reg [1:0] s1_tlb_req_size; // @[DCache.scala:208:29]
reg [4:0] s1_tlb_req_cmd; // @[DCache.scala:208:29]
reg [1:0] s1_tlb_req_prv; // @[DCache.scala:208:29]
reg s1_tlb_req_v; // @[DCache.scala:208:29]
wire _GEN_41 = s1_req_cmd == 5'h0; // @[package.scala:16:47]
wire _s1_read_T; // @[package.scala:16:47]
assign _s1_read_T = _GEN_41; // @[package.scala:16:47]
wire _pstore1_rmw_T; // @[package.scala:16:47]
assign _pstore1_rmw_T = _GEN_41; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_1; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_1 = _GEN_41; // @[package.scala:16:47]
wire _GEN_42 = s1_req_cmd == 5'h10; // @[package.scala:16:47]
wire _s1_read_T_1; // @[package.scala:16:47]
assign _s1_read_T_1 = _GEN_42; // @[package.scala:16:47]
wire _pstore1_rmw_T_1; // @[package.scala:16:47]
assign _pstore1_rmw_T_1 = _GEN_42; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_2; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_2 = _GEN_42; // @[package.scala:16:47]
wire _GEN_43 = s1_req_cmd == 5'h6; // @[package.scala:16:47]
wire _s1_read_T_2; // @[package.scala:16:47]
assign _s1_read_T_2 = _GEN_43; // @[package.scala:16:47]
wire _pstore1_rmw_T_2; // @[package.scala:16:47]
assign _pstore1_rmw_T_2 = _GEN_43; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_3; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_3 = _GEN_43; // @[package.scala:16:47]
wire _GEN_44 = s1_req_cmd == 5'h7; // @[package.scala:16:47]
wire _s1_read_T_3; // @[package.scala:16:47]
assign _s1_read_T_3 = _GEN_44; // @[package.scala:16:47]
wire _s1_write_T_3; // @[Consts.scala:90:66]
assign _s1_write_T_3 = _GEN_44; // @[package.scala:16:47]
wire _pstore1_rmw_T_3; // @[package.scala:16:47]
assign _pstore1_rmw_T_3 = _GEN_44; // @[package.scala:16:47]
wire _pstore1_rmw_T_28; // @[Consts.scala:90:66]
assign _pstore1_rmw_T_28 = _GEN_44; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_4; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_4 = _GEN_44; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_29; // @[Consts.scala:90:66]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_29 = _GEN_44; // @[package.scala:16:47]
wire _s1_read_T_4 = _s1_read_T | _s1_read_T_1; // @[package.scala:16:47, :81:59]
wire _s1_read_T_5 = _s1_read_T_4 | _s1_read_T_2; // @[package.scala:16:47, :81:59]
wire _s1_read_T_6 = _s1_read_T_5 | _s1_read_T_3; // @[package.scala:16:47, :81:59]
wire _GEN_45 = s1_req_cmd == 5'h4; // @[package.scala:16:47]
wire _s1_read_T_7; // @[package.scala:16:47]
assign _s1_read_T_7 = _GEN_45; // @[package.scala:16:47]
wire _s1_write_T_5; // @[package.scala:16:47]
assign _s1_write_T_5 = _GEN_45; // @[package.scala:16:47]
wire _pstore1_rmw_T_7; // @[package.scala:16:47]
assign _pstore1_rmw_T_7 = _GEN_45; // @[package.scala:16:47]
wire _pstore1_rmw_T_30; // @[package.scala:16:47]
assign _pstore1_rmw_T_30 = _GEN_45; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_8; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_8 = _GEN_45; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_31; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_31 = _GEN_45; // @[package.scala:16:47]
wire _GEN_46 = s1_req_cmd == 5'h9; // @[package.scala:16:47]
wire _s1_read_T_8; // @[package.scala:16:47]
assign _s1_read_T_8 = _GEN_46; // @[package.scala:16:47]
wire _s1_write_T_6; // @[package.scala:16:47]
assign _s1_write_T_6 = _GEN_46; // @[package.scala:16:47]
wire _pstore1_rmw_T_8; // @[package.scala:16:47]
assign _pstore1_rmw_T_8 = _GEN_46; // @[package.scala:16:47]
wire _pstore1_rmw_T_31; // @[package.scala:16:47]
assign _pstore1_rmw_T_31 = _GEN_46; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_9; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_9 = _GEN_46; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_32; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_32 = _GEN_46; // @[package.scala:16:47]
wire _GEN_47 = s1_req_cmd == 5'hA; // @[package.scala:16:47]
wire _s1_read_T_9; // @[package.scala:16:47]
assign _s1_read_T_9 = _GEN_47; // @[package.scala:16:47]
wire _s1_write_T_7; // @[package.scala:16:47]
assign _s1_write_T_7 = _GEN_47; // @[package.scala:16:47]
wire _pstore1_rmw_T_9; // @[package.scala:16:47]
assign _pstore1_rmw_T_9 = _GEN_47; // @[package.scala:16:47]
wire _pstore1_rmw_T_32; // @[package.scala:16:47]
assign _pstore1_rmw_T_32 = _GEN_47; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_10; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_10 = _GEN_47; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_33; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_33 = _GEN_47; // @[package.scala:16:47]
wire _GEN_48 = s1_req_cmd == 5'hB; // @[package.scala:16:47]
wire _s1_read_T_10; // @[package.scala:16:47]
assign _s1_read_T_10 = _GEN_48; // @[package.scala:16:47]
wire _s1_write_T_8; // @[package.scala:16:47]
assign _s1_write_T_8 = _GEN_48; // @[package.scala:16:47]
wire _pstore1_rmw_T_10; // @[package.scala:16:47]
assign _pstore1_rmw_T_10 = _GEN_48; // @[package.scala:16:47]
wire _pstore1_rmw_T_33; // @[package.scala:16:47]
assign _pstore1_rmw_T_33 = _GEN_48; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_11; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_11 = _GEN_48; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_34; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_34 = _GEN_48; // @[package.scala:16:47]
wire _s1_read_T_11 = _s1_read_T_7 | _s1_read_T_8; // @[package.scala:16:47, :81:59]
wire _s1_read_T_12 = _s1_read_T_11 | _s1_read_T_9; // @[package.scala:16:47, :81:59]
wire _s1_read_T_13 = _s1_read_T_12 | _s1_read_T_10; // @[package.scala:16:47, :81:59]
wire _GEN_49 = s1_req_cmd == 5'h8; // @[package.scala:16:47]
wire _s1_read_T_14; // @[package.scala:16:47]
assign _s1_read_T_14 = _GEN_49; // @[package.scala:16:47]
wire _s1_write_T_12; // @[package.scala:16:47]
assign _s1_write_T_12 = _GEN_49; // @[package.scala:16:47]
wire _pstore1_rmw_T_14; // @[package.scala:16:47]
assign _pstore1_rmw_T_14 = _GEN_49; // @[package.scala:16:47]
wire _pstore1_rmw_T_37; // @[package.scala:16:47]
assign _pstore1_rmw_T_37 = _GEN_49; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_15; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_15 = _GEN_49; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_38; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_38 = _GEN_49; // @[package.scala:16:47]
wire _GEN_50 = s1_req_cmd == 5'hC; // @[package.scala:16:47]
wire _s1_read_T_15; // @[package.scala:16:47]
assign _s1_read_T_15 = _GEN_50; // @[package.scala:16:47]
wire _s1_write_T_13; // @[package.scala:16:47]
assign _s1_write_T_13 = _GEN_50; // @[package.scala:16:47]
wire _pstore1_rmw_T_15; // @[package.scala:16:47]
assign _pstore1_rmw_T_15 = _GEN_50; // @[package.scala:16:47]
wire _pstore1_rmw_T_38; // @[package.scala:16:47]
assign _pstore1_rmw_T_38 = _GEN_50; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_16; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_16 = _GEN_50; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_39; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_39 = _GEN_50; // @[package.scala:16:47]
wire _GEN_51 = s1_req_cmd == 5'hD; // @[package.scala:16:47]
wire _s1_read_T_16; // @[package.scala:16:47]
assign _s1_read_T_16 = _GEN_51; // @[package.scala:16:47]
wire _s1_write_T_14; // @[package.scala:16:47]
assign _s1_write_T_14 = _GEN_51; // @[package.scala:16:47]
wire _pstore1_rmw_T_16; // @[package.scala:16:47]
assign _pstore1_rmw_T_16 = _GEN_51; // @[package.scala:16:47]
wire _pstore1_rmw_T_39; // @[package.scala:16:47]
assign _pstore1_rmw_T_39 = _GEN_51; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_17; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_17 = _GEN_51; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_40; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_40 = _GEN_51; // @[package.scala:16:47]
wire _GEN_52 = s1_req_cmd == 5'hE; // @[package.scala:16:47]
wire _s1_read_T_17; // @[package.scala:16:47]
assign _s1_read_T_17 = _GEN_52; // @[package.scala:16:47]
wire _s1_write_T_15; // @[package.scala:16:47]
assign _s1_write_T_15 = _GEN_52; // @[package.scala:16:47]
wire _pstore1_rmw_T_17; // @[package.scala:16:47]
assign _pstore1_rmw_T_17 = _GEN_52; // @[package.scala:16:47]
wire _pstore1_rmw_T_40; // @[package.scala:16:47]
assign _pstore1_rmw_T_40 = _GEN_52; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_18; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_18 = _GEN_52; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_41; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_41 = _GEN_52; // @[package.scala:16:47]
wire _GEN_53 = s1_req_cmd == 5'hF; // @[package.scala:16:47]
wire _s1_read_T_18; // @[package.scala:16:47]
assign _s1_read_T_18 = _GEN_53; // @[package.scala:16:47]
wire _s1_write_T_16; // @[package.scala:16:47]
assign _s1_write_T_16 = _GEN_53; // @[package.scala:16:47]
wire _pstore1_rmw_T_18; // @[package.scala:16:47]
assign _pstore1_rmw_T_18 = _GEN_53; // @[package.scala:16:47]
wire _pstore1_rmw_T_41; // @[package.scala:16:47]
assign _pstore1_rmw_T_41 = _GEN_53; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_19; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_19 = _GEN_53; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_42; // @[package.scala:16:47]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_42 = _GEN_53; // @[package.scala:16:47]
wire _s1_read_T_19 = _s1_read_T_14 | _s1_read_T_15; // @[package.scala:16:47, :81:59]
wire _s1_read_T_20 = _s1_read_T_19 | _s1_read_T_16; // @[package.scala:16:47, :81:59]
wire _s1_read_T_21 = _s1_read_T_20 | _s1_read_T_17; // @[package.scala:16:47, :81:59]
wire _s1_read_T_22 = _s1_read_T_21 | _s1_read_T_18; // @[package.scala:16:47, :81:59]
wire _s1_read_T_23 = _s1_read_T_13 | _s1_read_T_22; // @[package.scala:81:59]
wire s1_read = _s1_read_T_6 | _s1_read_T_23; // @[package.scala:81:59]
wire _GEN_54 = s1_req_cmd == 5'h1; // @[DCache.scala:196:25]
wire _s1_write_T; // @[Consts.scala:90:32]
assign _s1_write_T = _GEN_54; // @[Consts.scala:90:32]
wire _pstore1_rmw_T_25; // @[Consts.scala:90:32]
assign _pstore1_rmw_T_25 = _GEN_54; // @[Consts.scala:90:32]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_26; // @[Consts.scala:90:32]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_26 = _GEN_54; // @[Consts.scala:90:32]
wire _T_20 = s1_req_cmd == 5'h11; // @[DCache.scala:196:25]
wire _s1_write_T_1; // @[Consts.scala:90:49]
assign _s1_write_T_1 = _T_20; // @[Consts.scala:90:49]
wire _s1_mask_T; // @[DCache.scala:327:32]
assign _s1_mask_T = _T_20; // @[DCache.scala:327:32]
wire _pstore1_rmw_T_26; // @[Consts.scala:90:49]
assign _pstore1_rmw_T_26 = _T_20; // @[Consts.scala:90:49]
wire _pstore1_rmw_T_48; // @[DCache.scala:1191:35]
assign _pstore1_rmw_T_48 = _T_20; // @[DCache.scala:1191:35]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_27; // @[Consts.scala:90:49]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_27 = _T_20; // @[Consts.scala:90:49]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_49; // @[DCache.scala:1191:35]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_49 = _T_20; // @[DCache.scala:1191:35]
wire _s1_write_T_2 = _s1_write_T | _s1_write_T_1; // @[Consts.scala:90:{32,42,49}]
wire _s1_write_T_4 = _s1_write_T_2 | _s1_write_T_3; // @[Consts.scala:90:{42,59,66}]
wire _s1_write_T_9 = _s1_write_T_5 | _s1_write_T_6; // @[package.scala:16:47, :81:59]
wire _s1_write_T_10 = _s1_write_T_9 | _s1_write_T_7; // @[package.scala:16:47, :81:59]
wire _s1_write_T_11 = _s1_write_T_10 | _s1_write_T_8; // @[package.scala:16:47, :81:59]
wire _s1_write_T_17 = _s1_write_T_12 | _s1_write_T_13; // @[package.scala:16:47, :81:59]
wire _s1_write_T_18 = _s1_write_T_17 | _s1_write_T_14; // @[package.scala:16:47, :81:59]
wire _s1_write_T_19 = _s1_write_T_18 | _s1_write_T_15; // @[package.scala:16:47, :81:59]
wire _s1_write_T_20 = _s1_write_T_19 | _s1_write_T_16; // @[package.scala:16:47, :81:59]
wire _s1_write_T_21 = _s1_write_T_11 | _s1_write_T_20; // @[package.scala:81:59]
wire s1_write = _s1_write_T_4 | _s1_write_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire s1_readwrite = s1_read | s1_write; // @[DCache.scala:212:30]
wire _s1_sfence_T = s1_req_cmd == 5'h14; // @[DCache.scala:196:25, :213:30]
wire _GEN_55 = s1_req_cmd == 5'h15; // @[DCache.scala:196:25, :213:57]
wire _s1_sfence_T_1; // @[DCache.scala:213:57]
assign _s1_sfence_T_1 = _GEN_55; // @[DCache.scala:213:57]
wire _tlb_io_sfence_bits_hv_T; // @[DCache.scala:283:39]
assign _tlb_io_sfence_bits_hv_T = _GEN_55; // @[DCache.scala:213:57, :283:39]
wire _s1_sfence_T_2 = _s1_sfence_T | _s1_sfence_T_1; // @[DCache.scala:213:{30,43,57}]
wire _GEN_56 = s1_req_cmd == 5'h16; // @[DCache.scala:196:25, :213:85]
wire _s1_sfence_T_3; // @[DCache.scala:213:85]
assign _s1_sfence_T_3 = _GEN_56; // @[DCache.scala:213:85]
wire _tlb_io_sfence_bits_hg_T; // @[DCache.scala:284:39]
assign _tlb_io_sfence_bits_hg_T = _GEN_56; // @[DCache.scala:213:85, :284:39]
wire s1_sfence = _s1_sfence_T_2 | _s1_sfence_T_3; // @[DCache.scala:213:{43,71,85}]
wire _s1_flush_line_T = s1_req_cmd == 5'h5; // @[DCache.scala:196:25, :214:34]
wire _s1_flush_line_T_1 = s1_req_size[0]; // @[DCache.scala:196:25, :214:64]
wire _tlb_io_sfence_bits_rs1_T = s1_req_size[0]; // @[DCache.scala:196:25, :214:64, :279:40]
wire s1_flush_line = _s1_flush_line_T & _s1_flush_line_T_1; // @[DCache.scala:214:{34,50,64}]
reg s1_flush_valid; // @[DCache.scala:215:27]
reg cached_grant_wait; // @[DCache.scala:223:34]
reg resetting; // @[DCache.scala:224:26]
assign metaArb_io_in_0_valid = resetting; // @[DCache.scala:135:28, :224:26]
reg [3:0] flushCounter; // @[DCache.scala:225:29]
reg release_ack_wait; // @[DCache.scala:226:33]
reg [31:0] release_ack_addr; // @[DCache.scala:227:29]
reg [3:0] release_state; // @[DCache.scala:228:30]
reg [3:0] refill_way; // @[DCache.scala:229:23]
assign metaArb_io_in_3_bits_way_en = refill_way; // @[DCache.scala:135:28, :229:23]
assign dataArb_io_in_1_bits_way_en = refill_way; // @[DCache.scala:152:28, :229:23]
wire _any_pstore_valid_T; // @[DCache.scala:508:36]
wire any_pstore_valid; // @[DCache.scala:230:30]
wire _T_106 = release_state == 4'h1; // @[package.scala:16:47]
wire _inWriteback_T; // @[package.scala:16:47]
assign _inWriteback_T = _T_106; // @[package.scala:16:47]
wire _canAcceptCachedGrant_T; // @[package.scala:16:47]
assign _canAcceptCachedGrant_T = _T_106; // @[package.scala:16:47]
wire _inWriteback_T_1 = release_state == 4'h2; // @[package.scala:16:47]
wire inWriteback = _inWriteback_T | _inWriteback_T_1; // @[package.scala:16:47, :81:59]
assign metaArb_io_in_4_bits_way_en = releaseWay; // @[DCache.scala:135:28, :232:24]
assign metaArb_io_in_5_bits_way_en = releaseWay; // @[DCache.scala:135:28, :232:24]
assign metaArb_io_in_6_bits_way_en = releaseWay; // @[DCache.scala:135:28, :232:24]
assign metaArb_io_in_7_bits_way_en = releaseWay; // @[DCache.scala:135:28, :232:24]
wire _io_cpu_req_ready_T = ~(|release_state); // @[DCache.scala:228:30, :233:38]
wire _io_cpu_req_ready_T_1 = ~cached_grant_wait; // @[DCache.scala:223:34, :233:54]
wire _io_cpu_req_ready_T_2 = _io_cpu_req_ready_T & _io_cpu_req_ready_T_1; // @[DCache.scala:233:{38,51,54}]
wire _io_cpu_req_ready_T_3 = ~s1_nack; // @[DCache.scala:185:28, :187:41, :233:76]
wire _io_cpu_req_ready_T_4 = _io_cpu_req_ready_T_2 & _io_cpu_req_ready_T_3; // @[DCache.scala:233:{51,73,76}]
reg uncachedInFlight_0; // @[DCache.scala:236:33]
wire _s2_valid_cached_miss_T_2 = uncachedInFlight_0; // @[DCache.scala:236:33, :425:88]
wire _s2_valid_uncached_pending_T_1 = uncachedInFlight_0; // @[DCache.scala:236:33, :430:92]
wire _io_cpu_ordered_T_6 = uncachedInFlight_0; // @[DCache.scala:236:33, :929:142]
wire _io_cpu_store_pending_T_24 = uncachedInFlight_0; // @[DCache.scala:236:33, :930:97]
wire _clock_en_reg_T_21 = uncachedInFlight_0; // @[DCache.scala:236:33, :1072:50]
reg [39:0] uncachedReqs_0_addr; // @[DCache.scala:237:25]
wire [39:0] uncachedResp_addr = uncachedReqs_0_addr; // @[DCache.scala:237:25, :238:30]
reg [7:0] uncachedReqs_0_tag; // @[DCache.scala:237:25]
wire [7:0] uncachedResp_tag = uncachedReqs_0_tag; // @[DCache.scala:237:25, :238:30]
reg [4:0] uncachedReqs_0_cmd; // @[DCache.scala:237:25]
wire [4:0] uncachedResp_cmd = uncachedReqs_0_cmd; // @[DCache.scala:237:25, :238:30]
reg [1:0] uncachedReqs_0_size; // @[DCache.scala:237:25]
wire [1:0] uncachedResp_size = uncachedReqs_0_size; // @[DCache.scala:237:25, :238:30]
reg uncachedReqs_0_signed; // @[DCache.scala:237:25]
wire uncachedResp_signed = uncachedReqs_0_signed; // @[DCache.scala:237:25, :238:30]
reg [1:0] uncachedReqs_0_dprv; // @[DCache.scala:237:25]
wire [1:0] uncachedResp_dprv = uncachedReqs_0_dprv; // @[DCache.scala:237:25, :238:30]
reg uncachedReqs_0_dv; // @[DCache.scala:237:25]
wire uncachedResp_dv = uncachedReqs_0_dv; // @[DCache.scala:237:25, :238:30]
reg uncachedReqs_0_phys; // @[DCache.scala:237:25]
wire uncachedResp_phys = uncachedReqs_0_phys; // @[DCache.scala:237:25, :238:30]
reg uncachedReqs_0_no_resp; // @[DCache.scala:237:25]
wire uncachedResp_no_resp = uncachedReqs_0_no_resp; // @[DCache.scala:237:25, :238:30]
reg uncachedReqs_0_no_alloc; // @[DCache.scala:237:25]
wire uncachedResp_no_alloc = uncachedReqs_0_no_alloc; // @[DCache.scala:237:25, :238:30]
reg uncachedReqs_0_no_xcpt; // @[DCache.scala:237:25]
wire uncachedResp_no_xcpt = uncachedReqs_0_no_xcpt; // @[DCache.scala:237:25, :238:30]
reg [63:0] uncachedReqs_0_data; // @[DCache.scala:237:25]
wire [63:0] uncachedResp_data = uncachedReqs_0_data; // @[DCache.scala:237:25, :238:30]
reg [7:0] uncachedReqs_0_mask; // @[DCache.scala:237:25]
wire [7:0] uncachedResp_mask = uncachedReqs_0_mask; // @[DCache.scala:237:25, :238:30]
wire _dataArb_io_in_3_valid_T_56 = ~_dataArb_io_in_3_valid_T_55; // @[DCache.scala:1186:11]
assign dataArb_io_in_3_valid = _dataArb_io_in_3_valid_T_58; // @[DCache.scala:152:28, :242:46]
wire [31:0] _dataArb_io_in_3_bits_addr_T = io_cpu_req_bits_addr_0[39:8]; // @[DCache.scala:101:7, :245:89]
wire [31:0] _metaArb_io_in_1_bits_addr_T = io_cpu_req_bits_addr_0[39:8]; // @[DCache.scala:101:7, :245:89, :454:58]
wire [31:0] _metaArb_io_in_2_bits_addr_T = io_cpu_req_bits_addr_0[39:8]; // @[DCache.scala:101:7, :245:89, :466:58]
wire [31:0] _metaArb_io_in_3_bits_addr_T = io_cpu_req_bits_addr_0[39:8]; // @[DCache.scala:101:7, :245:89, :745:58]
wire [31:0] _metaArb_io_in_4_bits_addr_T = io_cpu_req_bits_addr_0[39:8]; // @[DCache.scala:101:7, :245:89, :912:58]
wire [31:0] _metaArb_io_in_5_bits_addr_T = io_cpu_req_bits_addr_0[39:8]; // @[DCache.scala:101:7, :245:89, :1018:58]
wire [7:0] _dataArb_io_in_3_bits_addr_T_1 = io_cpu_req_bits_addr_0[7:0]; // @[DCache.scala:101:7, :245:120]
wire [39:0] _dataArb_io_in_3_bits_addr_T_2 = {_dataArb_io_in_3_bits_addr_T, _dataArb_io_in_3_bits_addr_T_1}; // @[DCache.scala:245:{36,89,120}]
assign dataArb_io_in_3_bits_addr = _dataArb_io_in_3_bits_addr_T_2[7:0]; // @[DCache.scala:152:28, :245:{30,36}]
wire _s1_did_read_T_54 = dataArb_io_in_3_ready & _s1_did_read_T_53; // @[DCache.scala:152:28, :259:{54,75}]
reg s1_did_read; // @[DCache.scala:259:30]
wire _s2_data_word_en_T = s1_did_read; // @[DCache.scala:259:30, :367:63]
assign _metaArb_io_in_7_bits_idx_T = _dataArb_io_in_3_bits_addr_T_2[7:6]; // @[DCache.scala:245:36, :263:58]
assign metaArb_io_in_7_bits_idx = _metaArb_io_in_7_bits_idx_T; // @[DCache.scala:135:28, :263:58]
wire _s1_cmd_uses_tlb_T = s1_readwrite | s1_flush_line; // @[DCache.scala:212:30, :214:50, :270:38]
wire _s1_cmd_uses_tlb_T_1 = s1_req_cmd == 5'h17; // @[DCache.scala:196:25, :270:69]
wire s1_cmd_uses_tlb = _s1_cmd_uses_tlb_T | _s1_cmd_uses_tlb_T_1; // @[DCache.scala:270:{38,55,69}]
wire _tlb_io_req_valid_T = ~io_cpu_s1_kill_0; // @[DCache.scala:101:7, :186:37, :273:55]
wire _tlb_io_req_valid_T_1 = s1_valid & _tlb_io_req_valid_T; // @[DCache.scala:182:25, :273:{52,55}]
wire _tlb_io_req_valid_T_2 = _tlb_io_req_valid_T_1 & s1_cmd_uses_tlb; // @[DCache.scala:270:55, :273:{52,71}]
wire _tlb_io_req_valid_T_3 = _tlb_io_req_valid_T_2; // @[DCache.scala:273:{40,71}]
wire _T_10 = ~_tlb_io_req_ready & ~io_ptw_resp_valid_0 & ~io_cpu_req_bits_phys_0; // @[DCache.scala:101:7, :119:19, :275:{9,27,30,53,56}]
wire _T_14 = s1_valid & s1_cmd_uses_tlb & _tlb_io_resp_miss; // @[DCache.scala:119:19, :182:25, :270:55, :276:{39,58}]
wire _tlb_io_sfence_valid_T = ~io_cpu_s1_kill_0; // @[DCache.scala:101:7, :186:37, :278:38]
wire _tlb_io_sfence_valid_T_1 = s1_valid & _tlb_io_sfence_valid_T; // @[DCache.scala:182:25, :278:{35,38}]
wire _tlb_io_sfence_valid_T_2 = _tlb_io_sfence_valid_T_1 & s1_sfence; // @[DCache.scala:213:71, :278:{35,54}]
wire _tlb_io_sfence_bits_rs2_T = s1_req_size[1]; // @[DCache.scala:196:25, :280:40]
wire [19:0] _s1_paddr_T = s1_req_addr[31:12]; // @[DCache.scala:196:25, :298:55]
wire [19:0] _s1_paddr_T_1 = _tlb_io_resp_paddr[31:12]; // @[DCache.scala:119:19, :298:99]
wire [19:0] _s1_paddr_T_2 = _s1_paddr_T_1; // @[DCache.scala:298:{25,99}]
wire [11:0] _s1_paddr_T_3 = s1_req_addr[11:0]; // @[DCache.scala:196:25, :298:125]
wire [31:0] s1_paddr = {_s1_paddr_T_2, _s1_paddr_T_3}; // @[DCache.scala:298:{21,25,125}]
wire [1:0] _s1_victim_way_T; // @[package.scala:163:13]
wire [1:0] s1_victim_way; // @[DCache.scala:299:27]
assign rerocc_tile_dcache_tag_array_MPORT_en = metaArb_io_out_valid & metaArb_io_out_bits_write; // @[DCache.scala:135:28, :310:27]
assign wmask_0 = metaArb_io_out_bits_way_en[0]; // @[DCache.scala:135:28, :311:74]
assign wmask_1 = metaArb_io_out_bits_way_en[1]; // @[DCache.scala:135:28, :311:74]
assign wmask_2 = metaArb_io_out_bits_way_en[2]; // @[DCache.scala:135:28, :311:74]
assign wmask_3 = metaArb_io_out_bits_way_en[3]; // @[DCache.scala:135:28, :311:74]
wire _s1_meta_T = ~metaArb_io_out_bits_write; // @[DCache.scala:135:28, :190:43, :314:62]
assign _s1_meta_T_1 = metaArb_io_out_valid & _s1_meta_T; // @[DCache.scala:135:28, :314:{59,62}]
wire [1:0] _s1_meta_uncorrected_T_1; // @[DCache.scala:315:80]
wire [23:0] _s1_meta_uncorrected_T; // @[DCache.scala:315:80]
wire [1:0] s1_meta_uncorrected_0_coh_state; // @[DCache.scala:315:80]
wire [23:0] s1_meta_uncorrected_0_tag; // @[DCache.scala:315:80]
assign _s1_meta_uncorrected_T = _s1_meta_uncorrected_WIRE[23:0]; // @[DCache.scala:315:80]
assign s1_meta_uncorrected_0_tag = _s1_meta_uncorrected_T; // @[DCache.scala:315:80]
assign _s1_meta_uncorrected_T_1 = _s1_meta_uncorrected_WIRE[25:24]; // @[DCache.scala:315:80]
assign s1_meta_uncorrected_0_coh_state = _s1_meta_uncorrected_T_1; // @[DCache.scala:315:80]
wire [1:0] _s1_meta_uncorrected_T_3; // @[DCache.scala:315:80]
wire [23:0] _s1_meta_uncorrected_T_2; // @[DCache.scala:315:80]
wire [1:0] s1_meta_uncorrected_1_coh_state; // @[DCache.scala:315:80]
wire [23:0] s1_meta_uncorrected_1_tag; // @[DCache.scala:315:80]
assign _s1_meta_uncorrected_T_2 = _s1_meta_uncorrected_WIRE_1[23:0]; // @[DCache.scala:315:80]
assign s1_meta_uncorrected_1_tag = _s1_meta_uncorrected_T_2; // @[DCache.scala:315:80]
assign _s1_meta_uncorrected_T_3 = _s1_meta_uncorrected_WIRE_1[25:24]; // @[DCache.scala:315:80]
assign s1_meta_uncorrected_1_coh_state = _s1_meta_uncorrected_T_3; // @[DCache.scala:315:80]
wire [1:0] _s1_meta_uncorrected_T_5; // @[DCache.scala:315:80]
wire [23:0] _s1_meta_uncorrected_T_4; // @[DCache.scala:315:80]
wire [1:0] s1_meta_uncorrected_2_coh_state; // @[DCache.scala:315:80]
wire [23:0] s1_meta_uncorrected_2_tag; // @[DCache.scala:315:80]
assign _s1_meta_uncorrected_T_4 = _s1_meta_uncorrected_WIRE_2[23:0]; // @[DCache.scala:315:80]
assign s1_meta_uncorrected_2_tag = _s1_meta_uncorrected_T_4; // @[DCache.scala:315:80]
assign _s1_meta_uncorrected_T_5 = _s1_meta_uncorrected_WIRE_2[25:24]; // @[DCache.scala:315:80]
assign s1_meta_uncorrected_2_coh_state = _s1_meta_uncorrected_T_5; // @[DCache.scala:315:80]
wire [1:0] _s1_meta_uncorrected_T_7; // @[DCache.scala:315:80]
wire [23:0] _s1_meta_uncorrected_T_6; // @[DCache.scala:315:80]
wire [1:0] s1_meta_uncorrected_3_coh_state; // @[DCache.scala:315:80]
wire [23:0] s1_meta_uncorrected_3_tag; // @[DCache.scala:315:80]
assign _s1_meta_uncorrected_T_6 = _s1_meta_uncorrected_WIRE_3[23:0]; // @[DCache.scala:315:80]
assign s1_meta_uncorrected_3_tag = _s1_meta_uncorrected_T_6; // @[DCache.scala:315:80]
assign _s1_meta_uncorrected_T_7 = _s1_meta_uncorrected_WIRE_3[25:24]; // @[DCache.scala:315:80]
assign s1_meta_uncorrected_3_coh_state = _s1_meta_uncorrected_T_7; // @[DCache.scala:315:80]
wire [23:0] s1_tag = s1_paddr[31:8]; // @[DCache.scala:298:21, :316:29]
wire _s1_meta_hit_way_T = |s1_meta_uncorrected_0_coh_state; // @[Metadata.scala:50:45]
wire _GEN_57 = s1_meta_uncorrected_0_tag == s1_tag; // @[DCache.scala:315:80, :316:29, :317:83]
wire _s1_meta_hit_way_T_1; // @[DCache.scala:317:83]
assign _s1_meta_hit_way_T_1 = _GEN_57; // @[DCache.scala:317:83]
wire _s1_meta_hit_state_T; // @[DCache.scala:319:48]
assign _s1_meta_hit_state_T = _GEN_57; // @[DCache.scala:317:83, :319:48]
wire _s1_meta_hit_way_T_2 = _s1_meta_hit_way_T & _s1_meta_hit_way_T_1; // @[Metadata.scala:50:45]
wire _s1_meta_hit_way_T_3 = |s1_meta_uncorrected_1_coh_state; // @[Metadata.scala:50:45]
wire _GEN_58 = s1_meta_uncorrected_1_tag == s1_tag; // @[DCache.scala:315:80, :316:29, :317:83]
wire _s1_meta_hit_way_T_4; // @[DCache.scala:317:83]
assign _s1_meta_hit_way_T_4 = _GEN_58; // @[DCache.scala:317:83]
wire _s1_meta_hit_state_T_4; // @[DCache.scala:319:48]
assign _s1_meta_hit_state_T_4 = _GEN_58; // @[DCache.scala:317:83, :319:48]
wire _s1_meta_hit_way_T_5 = _s1_meta_hit_way_T_3 & _s1_meta_hit_way_T_4; // @[Metadata.scala:50:45]
wire _s1_meta_hit_way_T_6 = |s1_meta_uncorrected_2_coh_state; // @[Metadata.scala:50:45]
wire _GEN_59 = s1_meta_uncorrected_2_tag == s1_tag; // @[DCache.scala:315:80, :316:29, :317:83]
wire _s1_meta_hit_way_T_7; // @[DCache.scala:317:83]
assign _s1_meta_hit_way_T_7 = _GEN_59; // @[DCache.scala:317:83]
wire _s1_meta_hit_state_T_8; // @[DCache.scala:319:48]
assign _s1_meta_hit_state_T_8 = _GEN_59; // @[DCache.scala:317:83, :319:48]
wire _s1_meta_hit_way_T_8 = _s1_meta_hit_way_T_6 & _s1_meta_hit_way_T_7; // @[Metadata.scala:50:45]
wire _s1_meta_hit_way_T_9 = |s1_meta_uncorrected_3_coh_state; // @[Metadata.scala:50:45]
wire _GEN_60 = s1_meta_uncorrected_3_tag == s1_tag; // @[DCache.scala:315:80, :316:29, :317:83]
wire _s1_meta_hit_way_T_10; // @[DCache.scala:317:83]
assign _s1_meta_hit_way_T_10 = _GEN_60; // @[DCache.scala:317:83]
wire _s1_meta_hit_state_T_12; // @[DCache.scala:319:48]
assign _s1_meta_hit_state_T_12 = _GEN_60; // @[DCache.scala:317:83, :319:48]
wire _s1_meta_hit_way_T_11 = _s1_meta_hit_way_T_9 & _s1_meta_hit_way_T_10; // @[Metadata.scala:50:45]
wire [1:0] s1_meta_hit_way_lo = {_s1_meta_hit_way_T_5, _s1_meta_hit_way_T_2}; // @[package.scala:45:27]
wire [1:0] s1_meta_hit_way_hi = {_s1_meta_hit_way_T_11, _s1_meta_hit_way_T_8}; // @[package.scala:45:27]
wire [3:0] s1_hit_way = {s1_meta_hit_way_hi, s1_meta_hit_way_lo}; // @[package.scala:45:27]
wire _s1_meta_hit_state_T_1 = ~s1_flush_valid; // @[DCache.scala:215:27, :319:62]
wire _s1_meta_hit_state_T_2 = _s1_meta_hit_state_T & _s1_meta_hit_state_T_1; // @[DCache.scala:319:{48,59,62}]
wire [1:0] _s1_meta_hit_state_T_3 = _s1_meta_hit_state_T_2 ? s1_meta_uncorrected_0_coh_state : 2'h0; // @[DCache.scala:315:80, :319:{41,59}]
wire _s1_meta_hit_state_T_5 = ~s1_flush_valid; // @[DCache.scala:215:27, :319:62]
wire _s1_meta_hit_state_T_6 = _s1_meta_hit_state_T_4 & _s1_meta_hit_state_T_5; // @[DCache.scala:319:{48,59,62}]
wire [1:0] _s1_meta_hit_state_T_7 = _s1_meta_hit_state_T_6 ? s1_meta_uncorrected_1_coh_state : 2'h0; // @[DCache.scala:315:80, :319:{41,59}]
wire _s1_meta_hit_state_T_9 = ~s1_flush_valid; // @[DCache.scala:215:27, :319:62]
wire _s1_meta_hit_state_T_10 = _s1_meta_hit_state_T_8 & _s1_meta_hit_state_T_9; // @[DCache.scala:319:{48,59,62}]
wire [1:0] _s1_meta_hit_state_T_11 = _s1_meta_hit_state_T_10 ? s1_meta_uncorrected_2_coh_state : 2'h0; // @[DCache.scala:315:80, :319:{41,59}]
wire _s1_meta_hit_state_T_13 = ~s1_flush_valid; // @[DCache.scala:215:27, :319:62]
wire _s1_meta_hit_state_T_14 = _s1_meta_hit_state_T_12 & _s1_meta_hit_state_T_13; // @[DCache.scala:319:{48,59,62}]
wire [1:0] _s1_meta_hit_state_T_15 = _s1_meta_hit_state_T_14 ? s1_meta_uncorrected_3_coh_state : 2'h0; // @[DCache.scala:315:80, :319:{41,59}]
wire [1:0] _s1_meta_hit_state_T_16 = _s1_meta_hit_state_T_3 | _s1_meta_hit_state_T_7; // @[DCache.scala:319:41, :320:19]
wire [1:0] _s1_meta_hit_state_T_17 = _s1_meta_hit_state_T_16 | _s1_meta_hit_state_T_11; // @[DCache.scala:319:41, :320:19]
wire [1:0] _s1_meta_hit_state_T_18 = _s1_meta_hit_state_T_17 | _s1_meta_hit_state_T_15; // @[DCache.scala:319:41, :320:19]
wire [1:0] _s1_meta_hit_state_WIRE = _s1_meta_hit_state_T_18; // @[DCache.scala:320:{19,32}]
wire [1:0] _s1_meta_hit_state_T_19; // @[DCache.scala:320:32]
wire [1:0] s1_hit_state_state; // @[DCache.scala:320:32]
assign _s1_meta_hit_state_T_19 = _s1_meta_hit_state_WIRE; // @[DCache.scala:320:32]
assign s1_hit_state_state = _s1_meta_hit_state_T_19; // @[DCache.scala:320:32]
wire [3:0] _s1_data_way_T = inWriteback ? releaseWay : s1_hit_way; // @[package.scala:45:27, :81:59]
wire [4:0] s1_data_way; // @[DCache.scala:323:32]
wire [7:0] _tl_d_data_encoded_T = nodeOut_d_bits_data[7:0]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_13 = nodeOut_d_bits_data[7:0]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_1 = nodeOut_d_bits_data[15:8]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_14 = nodeOut_d_bits_data[15:8]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_2 = nodeOut_d_bits_data[23:16]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_15 = nodeOut_d_bits_data[23:16]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_3 = nodeOut_d_bits_data[31:24]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_16 = nodeOut_d_bits_data[31:24]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_4 = nodeOut_d_bits_data[39:32]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_17 = nodeOut_d_bits_data[39:32]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_5 = nodeOut_d_bits_data[47:40]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_18 = nodeOut_d_bits_data[47:40]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_6 = nodeOut_d_bits_data[55:48]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_19 = nodeOut_d_bits_data[55:48]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_7 = nodeOut_d_bits_data[63:56]; // @[package.scala:211:50]
wire [7:0] _tl_d_data_encoded_T_20 = nodeOut_d_bits_data[63:56]; // @[package.scala:211:50]
wire [15:0] tl_d_data_encoded_lo_lo = {_tl_d_data_encoded_T_1, _tl_d_data_encoded_T}; // @[package.scala:45:27, :211:50]
wire [15:0] tl_d_data_encoded_lo_hi = {_tl_d_data_encoded_T_3, _tl_d_data_encoded_T_2}; // @[package.scala:45:27, :211:50]
wire [31:0] tl_d_data_encoded_lo = {tl_d_data_encoded_lo_hi, tl_d_data_encoded_lo_lo}; // @[package.scala:45:27]
wire [15:0] tl_d_data_encoded_hi_lo = {_tl_d_data_encoded_T_5, _tl_d_data_encoded_T_4}; // @[package.scala:45:27, :211:50]
wire [15:0] tl_d_data_encoded_hi_hi = {_tl_d_data_encoded_T_7, _tl_d_data_encoded_T_6}; // @[package.scala:45:27, :211:50]
wire [31:0] tl_d_data_encoded_hi = {tl_d_data_encoded_hi_hi, tl_d_data_encoded_hi_lo}; // @[package.scala:45:27]
wire [63:0] _tl_d_data_encoded_T_8 = {tl_d_data_encoded_hi, tl_d_data_encoded_lo}; // @[package.scala:45:27]
wire [63:0] _tl_d_data_encoded_T_21; // @[package.scala:45:27]
assign dataArb_io_in_1_bits_wdata = tl_d_data_encoded; // @[DCache.scala:152:28, :324:31]
assign dataArb_io_in_2_bits_wdata = tl_d_data_encoded; // @[DCache.scala:152:28, :324:31]
assign dataArb_io_in_3_bits_wdata = tl_d_data_encoded; // @[DCache.scala:152:28, :324:31]
wire [63:0] s1_all_data_ways_4 = tl_d_data_encoded; // @[DCache.scala:324:31, :325:33]
wire [63:0] s2_data_s1_way_words_0_0 = s1_all_data_ways_0; // @[package.scala:211:50]
wire [63:0] s2_data_s1_way_words_1_0 = s1_all_data_ways_1; // @[package.scala:211:50]
wire [63:0] s2_data_s1_way_words_2_0 = s1_all_data_ways_2; // @[package.scala:211:50]
wire [63:0] s2_data_s1_way_words_3_0 = s1_all_data_ways_3; // @[package.scala:211:50]
wire [63:0] s2_data_s1_way_words_4_0 = s1_all_data_ways_4; // @[package.scala:211:50]
wire _s1_mask_xwr_upper_T = s1_req_addr[0]; // @[DCache.scala:196:25]
wire _s1_mask_xwr_lower_T = s1_req_addr[0]; // @[DCache.scala:196:25]
wire _s1_mask_xwr_upper_T_1 = _s1_mask_xwr_upper_T; // @[AMOALU.scala:20:{22,27}]
wire _s1_mask_xwr_upper_T_2 = |s1_mask_xwr_size; // @[AMOALU.scala:11:18, :20:53]
wire _s1_mask_xwr_upper_T_3 = _s1_mask_xwr_upper_T_2; // @[AMOALU.scala:20:{47,53}]
wire s1_mask_xwr_upper = _s1_mask_xwr_upper_T_1 | _s1_mask_xwr_upper_T_3; // @[AMOALU.scala:20:{22,42,47}]
wire s1_mask_xwr_lower = ~_s1_mask_xwr_lower_T; // @[AMOALU.scala:21:{22,27}]
wire [1:0] _s1_mask_xwr_T = {s1_mask_xwr_upper, s1_mask_xwr_lower}; // @[AMOALU.scala:20:42, :21:22, :22:16]
wire _s1_mask_xwr_upper_T_4 = s1_req_addr[1]; // @[DCache.scala:196:25]
wire _s1_mask_xwr_lower_T_1 = s1_req_addr[1]; // @[DCache.scala:196:25]
wire [1:0] _s1_mask_xwr_upper_T_5 = _s1_mask_xwr_upper_T_4 ? _s1_mask_xwr_T : 2'h0; // @[AMOALU.scala:20:{22,27}, :22:16]
wire _s1_mask_xwr_upper_T_6 = s1_mask_xwr_size[1]; // @[AMOALU.scala:11:18, :20:53]
wire [1:0] _s1_mask_xwr_upper_T_7 = {2{_s1_mask_xwr_upper_T_6}}; // @[AMOALU.scala:20:{47,53}]
wire [1:0] s1_mask_xwr_upper_1 = _s1_mask_xwr_upper_T_5 | _s1_mask_xwr_upper_T_7; // @[AMOALU.scala:20:{22,42,47}]
wire [1:0] s1_mask_xwr_lower_1 = _s1_mask_xwr_lower_T_1 ? 2'h0 : _s1_mask_xwr_T; // @[AMOALU.scala:21:{22,27}, :22:16]
wire [3:0] _s1_mask_xwr_T_1 = {s1_mask_xwr_upper_1, s1_mask_xwr_lower_1}; // @[AMOALU.scala:20:42, :21:22, :22:16]
wire _s1_mask_xwr_upper_T_8 = s1_req_addr[2]; // @[DCache.scala:196:25]
wire _s1_mask_xwr_lower_T_2 = s1_req_addr[2]; // @[DCache.scala:196:25]
wire [3:0] _s1_mask_xwr_upper_T_9 = _s1_mask_xwr_upper_T_8 ? _s1_mask_xwr_T_1 : 4'h0; // @[AMOALU.scala:20:{22,27}, :22:16]
wire _s1_mask_xwr_upper_T_10 = &s1_mask_xwr_size; // @[AMOALU.scala:11:18, :20:53]
wire [3:0] _s1_mask_xwr_upper_T_11 = {4{_s1_mask_xwr_upper_T_10}}; // @[AMOALU.scala:20:{47,53}]
wire [3:0] s1_mask_xwr_upper_2 = _s1_mask_xwr_upper_T_9 | _s1_mask_xwr_upper_T_11; // @[AMOALU.scala:20:{22,42,47}]
wire [3:0] s1_mask_xwr_lower_2 = _s1_mask_xwr_lower_T_2 ? 4'h0 : _s1_mask_xwr_T_1; // @[AMOALU.scala:21:{22,27}, :22:16]
wire [7:0] s1_mask_xwr = {s1_mask_xwr_upper_2, s1_mask_xwr_lower_2}; // @[AMOALU.scala:20:42, :21:22, :22:16]
wire [7:0] s1_mask = _s1_mask_T ? io_cpu_s1_data_mask_0 : s1_mask_xwr; // @[DCache.scala:101:7, :327:{20,32}]
wire _s2_valid_T = ~s1_sfence; // @[DCache.scala:213:71, :331:45]
wire _s2_valid_T_1 = s1_valid_masked & _s2_valid_T; // @[DCache.scala:186:34, :331:{42,45}]
reg s2_valid; // @[DCache.scala:331:25]
wire [1:0] _s2_valid_no_xcpt_T = {io_cpu_s2_xcpt_ae_ld_0, io_cpu_s2_xcpt_ae_st_0}; // @[DCache.scala:101:7, :332:54]
wire [1:0] _s2_valid_no_xcpt_T_2 = {io_cpu_s2_xcpt_pf_ld_0, io_cpu_s2_xcpt_pf_st_0}; // @[DCache.scala:101:7, :332:54]
wire [1:0] _s2_valid_no_xcpt_T_3 = {io_cpu_s2_xcpt_ma_ld_0, io_cpu_s2_xcpt_ma_st_0}; // @[DCache.scala:101:7, :332:54]
wire [3:0] s2_valid_no_xcpt_lo = {2'h0, _s2_valid_no_xcpt_T}; // @[DCache.scala:332:54]
wire [3:0] s2_valid_no_xcpt_hi = {_s2_valid_no_xcpt_T_3, _s2_valid_no_xcpt_T_2}; // @[DCache.scala:332:54]
wire [7:0] _s2_valid_no_xcpt_T_4 = {s2_valid_no_xcpt_hi, s2_valid_no_xcpt_lo}; // @[DCache.scala:332:54]
wire _s2_valid_no_xcpt_T_5 = |_s2_valid_no_xcpt_T_4; // @[DCache.scala:332:{54,61}]
wire _s2_valid_no_xcpt_T_6 = ~_s2_valid_no_xcpt_T_5; // @[DCache.scala:332:{38,61}]
wire s2_valid_no_xcpt = s2_valid & _s2_valid_no_xcpt_T_6; // @[DCache.scala:331:25, :332:{35,38}]
reg s2_probe; // @[DCache.scala:333:25]
wire _releaseInFlight_T = s1_probe | s2_probe; // @[DCache.scala:183:25, :333:25, :334:34]
wire _releaseInFlight_T_1 = |release_state; // @[DCache.scala:228:30, :233:38, :334:63]
wire releaseInFlight = _releaseInFlight_T | _releaseInFlight_T_1; // @[DCache.scala:334:{34,46,63}]
wire _s2_not_nacked_in_s1_T = ~s1_nack; // @[DCache.scala:185:28, :187:41, :335:37]
reg s2_not_nacked_in_s1; // @[DCache.scala:335:36]
wire s2_valid_not_nacked_in_s1 = s2_valid & s2_not_nacked_in_s1; // @[DCache.scala:331:25, :335:36, :336:44]
wire s2_valid_masked = s2_valid_no_xcpt & s2_not_nacked_in_s1; // @[DCache.scala:332:35, :335:36, :337:42]
wire s2_valid_not_killed = s2_valid_masked; // @[DCache.scala:337:42, :338:45]
wire _s2_valid_hit_maybe_flush_pre_data_ecc_and_waw_T_1 = s2_valid_masked; // @[DCache.scala:337:42, :397:71]
wire _s2_dont_nack_misc_T_1 = s2_valid_masked; // @[DCache.scala:337:42, :441:43]
reg [39:0] s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _get_legal_T_14 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _put_legal_T_14 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _putpartial_legal_T_14 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _atomics_legal_T_4 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _atomics_legal_T_64 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _atomics_legal_T_124 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _atomics_legal_T_184 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _atomics_legal_T_244 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _atomics_legal_T_304 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _atomics_legal_T_364 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _atomics_legal_T_424 = s2_req_addr; // @[DCache.scala:339:19]
wire [39:0] _atomics_legal_T_484 = s2_req_addr; // @[DCache.scala:339:19]
reg [7:0] s2_req_tag; // @[DCache.scala:339:19]
assign io_cpu_resp_bits_tag_0 = s2_req_tag; // @[DCache.scala:101:7, :339:19]
reg [4:0] s2_req_cmd; // @[DCache.scala:339:19]
assign io_cpu_resp_bits_cmd_0 = s2_req_cmd; // @[DCache.scala:101:7, :339:19]
reg [1:0] s2_req_size; // @[DCache.scala:339:19]
assign io_cpu_resp_bits_size_0 = s2_req_size; // @[DCache.scala:101:7, :339:19]
wire [1:0] size = s2_req_size; // @[DCache.scala:339:19]
reg s2_req_signed; // @[DCache.scala:339:19]
assign io_cpu_resp_bits_signed_0 = s2_req_signed; // @[DCache.scala:101:7, :339:19]
reg [1:0] s2_req_dprv; // @[DCache.scala:339:19]
assign io_cpu_resp_bits_dprv_0 = s2_req_dprv; // @[DCache.scala:101:7, :339:19]
reg s2_req_dv; // @[DCache.scala:339:19]
assign io_cpu_resp_bits_dv_0 = s2_req_dv; // @[DCache.scala:101:7, :339:19]
reg s2_req_phys; // @[DCache.scala:339:19]
reg s2_req_no_resp; // @[DCache.scala:339:19]
reg s2_req_no_alloc; // @[DCache.scala:339:19]
reg s2_req_no_xcpt; // @[DCache.scala:339:19]
reg [63:0] s2_req_data; // @[DCache.scala:339:19]
reg [7:0] s2_req_mask; // @[DCache.scala:339:19]
assign io_cpu_resp_bits_mask_0 = s2_req_mask; // @[DCache.scala:101:7, :339:19]
wire _GEN_61 = s2_req_cmd == 5'h5; // @[DCache.scala:339:19, :340:37]
wire _s2_cmd_flush_all_T; // @[DCache.scala:340:37]
assign _s2_cmd_flush_all_T = _GEN_61; // @[DCache.scala:340:37]
wire _s2_cmd_flush_line_T; // @[DCache.scala:341:38]
assign _s2_cmd_flush_line_T = _GEN_61; // @[DCache.scala:340:37, :341:38]
wire _s2_cmd_flush_all_T_1 = s2_req_size[0]; // @[DCache.scala:339:19, :340:68]
wire _s2_cmd_flush_line_T_1 = s2_req_size[0]; // @[DCache.scala:339:19, :340:68, :341:68]
wire _s2_cmd_flush_all_T_2 = ~_s2_cmd_flush_all_T_1; // @[DCache.scala:340:{56,68}]
wire s2_cmd_flush_all = _s2_cmd_flush_all_T & _s2_cmd_flush_all_T_2; // @[DCache.scala:340:{37,53,56}]
wire s2_cmd_flush_line = _s2_cmd_flush_line_T & _s2_cmd_flush_line_T_1; // @[DCache.scala:341:{38,54,68}]
reg s2_tlb_xcpt_miss; // @[DCache.scala:342:24]
reg [31:0] s2_tlb_xcpt_paddr; // @[DCache.scala:342:24]
reg [39:0] s2_tlb_xcpt_gpa; // @[DCache.scala:342:24]
assign io_cpu_s2_gpa_0 = s2_tlb_xcpt_gpa; // @[DCache.scala:101:7, :342:24]
reg s2_tlb_xcpt_pf_ld; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_pf_st; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_pf_inst; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_ae_ld; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_ae_st; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_ae_inst; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_ma_ld; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_ma_st; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_cacheable; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_must_alloc; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_prefetchable; // @[DCache.scala:342:24]
reg [1:0] s2_tlb_xcpt_size; // @[DCache.scala:342:24]
reg [4:0] s2_tlb_xcpt_cmd; // @[DCache.scala:342:24]
reg s2_pma_miss; // @[DCache.scala:343:19]
reg [31:0] s2_pma_paddr; // @[DCache.scala:343:19]
reg [39:0] s2_pma_gpa; // @[DCache.scala:343:19]
reg s2_pma_pf_ld; // @[DCache.scala:343:19]
reg s2_pma_pf_st; // @[DCache.scala:343:19]
reg s2_pma_pf_inst; // @[DCache.scala:343:19]
reg s2_pma_ae_ld; // @[DCache.scala:343:19]
reg s2_pma_ae_st; // @[DCache.scala:343:19]
reg s2_pma_ae_inst; // @[DCache.scala:343:19]
reg s2_pma_ma_ld; // @[DCache.scala:343:19]
reg s2_pma_ma_st; // @[DCache.scala:343:19]
reg s2_pma_cacheable; // @[DCache.scala:343:19]
reg s2_pma_must_alloc; // @[DCache.scala:343:19]
reg s2_pma_prefetchable; // @[DCache.scala:343:19]
reg [1:0] s2_pma_size; // @[DCache.scala:343:19]
reg [4:0] s2_pma_cmd; // @[DCache.scala:343:19]
reg [39:0] s2_uncached_resp_addr; // @[DCache.scala:344:34]
wire _T_30 = s1_valid_not_nacked | s1_flush_valid; // @[DCache.scala:187:38, :215:27, :345:29]
wire _s2_vaddr_T; // @[DCache.scala:351:62]
assign _s2_vaddr_T = _T_30; // @[DCache.scala:345:29, :351:62]
wire _s1_meta_clk_en_T; // @[DCache.scala:357:44]
assign _s1_meta_clk_en_T = _T_30; // @[DCache.scala:345:29, :357:44]
wire _s2_hit_state_T; // @[DCache.scala:386:66]
assign _s2_hit_state_T = _T_30; // @[DCache.scala:345:29, :386:66]
wire _s2_victim_way_T; // @[DCache.scala:431:77]
assign _s2_victim_way_T = _T_30; // @[DCache.scala:345:29, :431:77]
reg [39:0] s2_vaddr_r; // @[DCache.scala:351:31]
wire [31:0] _s2_vaddr_T_1 = s2_vaddr_r[39:8]; // @[DCache.scala:351:{31,81}]
wire [7:0] _s2_vaddr_T_2 = s2_req_addr[7:0]; // @[DCache.scala:339:19, :351:103]
wire [39:0] s2_vaddr = {_s2_vaddr_T_1, _s2_vaddr_T_2}; // @[DCache.scala:351:{21,81,103}]
wire _s2_read_T = s2_req_cmd == 5'h0; // @[package.scala:16:47]
wire _s2_read_T_1 = s2_req_cmd == 5'h10; // @[package.scala:16:47]
wire _GEN_62 = s2_req_cmd == 5'h6; // @[package.scala:16:47]
wire _s2_read_T_2; // @[package.scala:16:47]
assign _s2_read_T_2 = _GEN_62; // @[package.scala:16:47]
wire _r_c_cat_T_48; // @[Consts.scala:91:71]
assign _r_c_cat_T_48 = _GEN_62; // @[package.scala:16:47]
wire _s2_lr_T; // @[DCache.scala:470:70]
assign _s2_lr_T = _GEN_62; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_48; // @[Consts.scala:91:71]
assign _metaArb_io_in_3_bits_data_c_cat_T_48 = _GEN_62; // @[package.scala:16:47]
wire _GEN_63 = s2_req_cmd == 5'h7; // @[package.scala:16:47]
wire _s2_read_T_3; // @[package.scala:16:47]
assign _s2_read_T_3 = _GEN_63; // @[package.scala:16:47]
wire _s2_write_T_3; // @[Consts.scala:90:66]
assign _s2_write_T_3 = _GEN_63; // @[package.scala:16:47]
wire _r_c_cat_T_3; // @[Consts.scala:90:66]
assign _r_c_cat_T_3 = _GEN_63; // @[package.scala:16:47]
wire _r_c_cat_T_26; // @[Consts.scala:90:66]
assign _r_c_cat_T_26 = _GEN_63; // @[package.scala:16:47]
wire _s2_sc_T; // @[DCache.scala:471:70]
assign _s2_sc_T = _GEN_63; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_3; // @[Consts.scala:90:66]
assign _metaArb_io_in_3_bits_data_c_cat_T_3 = _GEN_63; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_26; // @[Consts.scala:90:66]
assign _metaArb_io_in_3_bits_data_c_cat_T_26 = _GEN_63; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_3; // @[Consts.scala:90:66]
assign _io_cpu_store_pending_T_3 = _GEN_63; // @[package.scala:16:47]
wire _s2_read_T_4 = _s2_read_T | _s2_read_T_1; // @[package.scala:16:47, :81:59]
wire _s2_read_T_5 = _s2_read_T_4 | _s2_read_T_2; // @[package.scala:16:47, :81:59]
wire _s2_read_T_6 = _s2_read_T_5 | _s2_read_T_3; // @[package.scala:16:47, :81:59]
wire _GEN_64 = s2_req_cmd == 5'h4; // @[package.scala:16:47]
wire _s2_read_T_7; // @[package.scala:16:47]
assign _s2_read_T_7 = _GEN_64; // @[package.scala:16:47]
wire _s2_write_T_5; // @[package.scala:16:47]
assign _s2_write_T_5 = _GEN_64; // @[package.scala:16:47]
wire _r_c_cat_T_5; // @[package.scala:16:47]
assign _r_c_cat_T_5 = _GEN_64; // @[package.scala:16:47]
wire _r_c_cat_T_28; // @[package.scala:16:47]
assign _r_c_cat_T_28 = _GEN_64; // @[package.scala:16:47]
wire _atomics_T; // @[DCache.scala:587:81]
assign _atomics_T = _GEN_64; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_5; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_5 = _GEN_64; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_28; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_28 = _GEN_64; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_5; // @[package.scala:16:47]
assign _io_cpu_store_pending_T_5 = _GEN_64; // @[package.scala:16:47]
wire _GEN_65 = s2_req_cmd == 5'h9; // @[package.scala:16:47]
wire _s2_read_T_8; // @[package.scala:16:47]
assign _s2_read_T_8 = _GEN_65; // @[package.scala:16:47]
wire _s2_write_T_6; // @[package.scala:16:47]
assign _s2_write_T_6 = _GEN_65; // @[package.scala:16:47]
wire _r_c_cat_T_6; // @[package.scala:16:47]
assign _r_c_cat_T_6 = _GEN_65; // @[package.scala:16:47]
wire _r_c_cat_T_29; // @[package.scala:16:47]
assign _r_c_cat_T_29 = _GEN_65; // @[package.scala:16:47]
wire _atomics_T_2; // @[DCache.scala:587:81]
assign _atomics_T_2 = _GEN_65; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_6; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_6 = _GEN_65; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_29; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_29 = _GEN_65; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_6; // @[package.scala:16:47]
assign _io_cpu_store_pending_T_6 = _GEN_65; // @[package.scala:16:47]
wire _GEN_66 = s2_req_cmd == 5'hA; // @[package.scala:16:47]
wire _s2_read_T_9; // @[package.scala:16:47]
assign _s2_read_T_9 = _GEN_66; // @[package.scala:16:47]
wire _s2_write_T_7; // @[package.scala:16:47]
assign _s2_write_T_7 = _GEN_66; // @[package.scala:16:47]
wire _r_c_cat_T_7; // @[package.scala:16:47]
assign _r_c_cat_T_7 = _GEN_66; // @[package.scala:16:47]
wire _r_c_cat_T_30; // @[package.scala:16:47]
assign _r_c_cat_T_30 = _GEN_66; // @[package.scala:16:47]
wire _atomics_T_4; // @[DCache.scala:587:81]
assign _atomics_T_4 = _GEN_66; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_7; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_7 = _GEN_66; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_30; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_30 = _GEN_66; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_7; // @[package.scala:16:47]
assign _io_cpu_store_pending_T_7 = _GEN_66; // @[package.scala:16:47]
wire _GEN_67 = s2_req_cmd == 5'hB; // @[package.scala:16:47]
wire _s2_read_T_10; // @[package.scala:16:47]
assign _s2_read_T_10 = _GEN_67; // @[package.scala:16:47]
wire _s2_write_T_8; // @[package.scala:16:47]
assign _s2_write_T_8 = _GEN_67; // @[package.scala:16:47]
wire _r_c_cat_T_8; // @[package.scala:16:47]
assign _r_c_cat_T_8 = _GEN_67; // @[package.scala:16:47]
wire _r_c_cat_T_31; // @[package.scala:16:47]
assign _r_c_cat_T_31 = _GEN_67; // @[package.scala:16:47]
wire _atomics_T_6; // @[DCache.scala:587:81]
assign _atomics_T_6 = _GEN_67; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_8; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_8 = _GEN_67; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_31; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_31 = _GEN_67; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_8; // @[package.scala:16:47]
assign _io_cpu_store_pending_T_8 = _GEN_67; // @[package.scala:16:47]
wire _s2_read_T_11 = _s2_read_T_7 | _s2_read_T_8; // @[package.scala:16:47, :81:59]
wire _s2_read_T_12 = _s2_read_T_11 | _s2_read_T_9; // @[package.scala:16:47, :81:59]
wire _s2_read_T_13 = _s2_read_T_12 | _s2_read_T_10; // @[package.scala:16:47, :81:59]
wire _GEN_68 = s2_req_cmd == 5'h8; // @[package.scala:16:47]
wire _s2_read_T_14; // @[package.scala:16:47]
assign _s2_read_T_14 = _GEN_68; // @[package.scala:16:47]
wire _s2_write_T_12; // @[package.scala:16:47]
assign _s2_write_T_12 = _GEN_68; // @[package.scala:16:47]
wire _r_c_cat_T_12; // @[package.scala:16:47]
assign _r_c_cat_T_12 = _GEN_68; // @[package.scala:16:47]
wire _r_c_cat_T_35; // @[package.scala:16:47]
assign _r_c_cat_T_35 = _GEN_68; // @[package.scala:16:47]
wire _atomics_T_8; // @[DCache.scala:587:81]
assign _atomics_T_8 = _GEN_68; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_12; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_12 = _GEN_68; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_35; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_35 = _GEN_68; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_12; // @[package.scala:16:47]
assign _io_cpu_store_pending_T_12 = _GEN_68; // @[package.scala:16:47]
wire _GEN_69 = s2_req_cmd == 5'hC; // @[package.scala:16:47]
wire _s2_read_T_15; // @[package.scala:16:47]
assign _s2_read_T_15 = _GEN_69; // @[package.scala:16:47]
wire _s2_write_T_13; // @[package.scala:16:47]
assign _s2_write_T_13 = _GEN_69; // @[package.scala:16:47]
wire _r_c_cat_T_13; // @[package.scala:16:47]
assign _r_c_cat_T_13 = _GEN_69; // @[package.scala:16:47]
wire _r_c_cat_T_36; // @[package.scala:16:47]
assign _r_c_cat_T_36 = _GEN_69; // @[package.scala:16:47]
wire _atomics_T_10; // @[DCache.scala:587:81]
assign _atomics_T_10 = _GEN_69; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_13; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_13 = _GEN_69; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_36; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_36 = _GEN_69; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_13; // @[package.scala:16:47]
assign _io_cpu_store_pending_T_13 = _GEN_69; // @[package.scala:16:47]
wire _GEN_70 = s2_req_cmd == 5'hD; // @[package.scala:16:47]
wire _s2_read_T_16; // @[package.scala:16:47]
assign _s2_read_T_16 = _GEN_70; // @[package.scala:16:47]
wire _s2_write_T_14; // @[package.scala:16:47]
assign _s2_write_T_14 = _GEN_70; // @[package.scala:16:47]
wire _r_c_cat_T_14; // @[package.scala:16:47]
assign _r_c_cat_T_14 = _GEN_70; // @[package.scala:16:47]
wire _r_c_cat_T_37; // @[package.scala:16:47]
assign _r_c_cat_T_37 = _GEN_70; // @[package.scala:16:47]
wire _atomics_T_12; // @[DCache.scala:587:81]
assign _atomics_T_12 = _GEN_70; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_14; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_14 = _GEN_70; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_37; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_37 = _GEN_70; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_14; // @[package.scala:16:47]
assign _io_cpu_store_pending_T_14 = _GEN_70; // @[package.scala:16:47]
wire _GEN_71 = s2_req_cmd == 5'hE; // @[package.scala:16:47]
wire _s2_read_T_17; // @[package.scala:16:47]
assign _s2_read_T_17 = _GEN_71; // @[package.scala:16:47]
wire _s2_write_T_15; // @[package.scala:16:47]
assign _s2_write_T_15 = _GEN_71; // @[package.scala:16:47]
wire _r_c_cat_T_15; // @[package.scala:16:47]
assign _r_c_cat_T_15 = _GEN_71; // @[package.scala:16:47]
wire _r_c_cat_T_38; // @[package.scala:16:47]
assign _r_c_cat_T_38 = _GEN_71; // @[package.scala:16:47]
wire _atomics_T_14; // @[DCache.scala:587:81]
assign _atomics_T_14 = _GEN_71; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_15; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_15 = _GEN_71; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_38; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_38 = _GEN_71; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_15; // @[package.scala:16:47]
assign _io_cpu_store_pending_T_15 = _GEN_71; // @[package.scala:16:47]
wire _GEN_72 = s2_req_cmd == 5'hF; // @[package.scala:16:47]
wire _s2_read_T_18; // @[package.scala:16:47]
assign _s2_read_T_18 = _GEN_72; // @[package.scala:16:47]
wire _s2_write_T_16; // @[package.scala:16:47]
assign _s2_write_T_16 = _GEN_72; // @[package.scala:16:47]
wire _r_c_cat_T_16; // @[package.scala:16:47]
assign _r_c_cat_T_16 = _GEN_72; // @[package.scala:16:47]
wire _r_c_cat_T_39; // @[package.scala:16:47]
assign _r_c_cat_T_39 = _GEN_72; // @[package.scala:16:47]
wire _atomics_T_16; // @[DCache.scala:587:81]
assign _atomics_T_16 = _GEN_72; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_16; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_16 = _GEN_72; // @[package.scala:16:47]
wire _metaArb_io_in_3_bits_data_c_cat_T_39; // @[package.scala:16:47]
assign _metaArb_io_in_3_bits_data_c_cat_T_39 = _GEN_72; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_16; // @[package.scala:16:47]
assign _io_cpu_store_pending_T_16 = _GEN_72; // @[package.scala:16:47]
wire _s2_read_T_19 = _s2_read_T_14 | _s2_read_T_15; // @[package.scala:16:47, :81:59]
wire _s2_read_T_20 = _s2_read_T_19 | _s2_read_T_16; // @[package.scala:16:47, :81:59]
wire _s2_read_T_21 = _s2_read_T_20 | _s2_read_T_17; // @[package.scala:16:47, :81:59]
wire _s2_read_T_22 = _s2_read_T_21 | _s2_read_T_18; // @[package.scala:16:47, :81:59]
wire _s2_read_T_23 = _s2_read_T_13 | _s2_read_T_22; // @[package.scala:81:59]
assign s2_read = _s2_read_T_6 | _s2_read_T_23; // @[package.scala:81:59]
assign io_cpu_resp_bits_has_data_0 = s2_read; // @[DCache.scala:101:7]
wire _GEN_73 = s2_req_cmd == 5'h1; // @[DCache.scala:339:19]
wire _s2_write_T; // @[Consts.scala:90:32]
assign _s2_write_T = _GEN_73; // @[Consts.scala:90:32]
wire _r_c_cat_T; // @[Consts.scala:90:32]
assign _r_c_cat_T = _GEN_73; // @[Consts.scala:90:32]
wire _r_c_cat_T_23; // @[Consts.scala:90:32]
assign _r_c_cat_T_23 = _GEN_73; // @[Consts.scala:90:32]
wire _metaArb_io_in_3_bits_data_c_cat_T; // @[Consts.scala:90:32]
assign _metaArb_io_in_3_bits_data_c_cat_T = _GEN_73; // @[Consts.scala:90:32]
wire _metaArb_io_in_3_bits_data_c_cat_T_23; // @[Consts.scala:90:32]
assign _metaArb_io_in_3_bits_data_c_cat_T_23 = _GEN_73; // @[Consts.scala:90:32]
wire _io_cpu_store_pending_T; // @[Consts.scala:90:32]
assign _io_cpu_store_pending_T = _GEN_73; // @[Consts.scala:90:32]
wire _GEN_74 = s2_req_cmd == 5'h11; // @[DCache.scala:339:19]
wire _s2_write_T_1; // @[Consts.scala:90:49]
assign _s2_write_T_1 = _GEN_74; // @[Consts.scala:90:49]
wire _r_c_cat_T_1; // @[Consts.scala:90:49]
assign _r_c_cat_T_1 = _GEN_74; // @[Consts.scala:90:49]
wire _r_c_cat_T_24; // @[Consts.scala:90:49]
assign _r_c_cat_T_24 = _GEN_74; // @[Consts.scala:90:49]
wire _tl_out_a_bits_T_4; // @[DCache.scala:610:20]
assign _tl_out_a_bits_T_4 = _GEN_74; // @[DCache.scala:610:20]
wire _uncachedReqs_0_cmd_T; // @[DCache.scala:637:49]
assign _uncachedReqs_0_cmd_T = _GEN_74; // @[DCache.scala:637:49]
wire _metaArb_io_in_3_bits_data_c_cat_T_1; // @[Consts.scala:90:49]
assign _metaArb_io_in_3_bits_data_c_cat_T_1 = _GEN_74; // @[Consts.scala:90:49]
wire _metaArb_io_in_3_bits_data_c_cat_T_24; // @[Consts.scala:90:49]
assign _metaArb_io_in_3_bits_data_c_cat_T_24 = _GEN_74; // @[Consts.scala:90:49]
wire _io_cpu_store_pending_T_1; // @[Consts.scala:90:49]
assign _io_cpu_store_pending_T_1 = _GEN_74; // @[Consts.scala:90:49]
wire _s2_write_T_2 = _s2_write_T | _s2_write_T_1; // @[Consts.scala:90:{32,42,49}]
wire _s2_write_T_4 = _s2_write_T_2 | _s2_write_T_3; // @[Consts.scala:90:{42,59,66}]
wire _s2_write_T_9 = _s2_write_T_5 | _s2_write_T_6; // @[package.scala:16:47, :81:59]
wire _s2_write_T_10 = _s2_write_T_9 | _s2_write_T_7; // @[package.scala:16:47, :81:59]
wire _s2_write_T_11 = _s2_write_T_10 | _s2_write_T_8; // @[package.scala:16:47, :81:59]
wire _s2_write_T_17 = _s2_write_T_12 | _s2_write_T_13; // @[package.scala:16:47, :81:59]
wire _s2_write_T_18 = _s2_write_T_17 | _s2_write_T_14; // @[package.scala:16:47, :81:59]
wire _s2_write_T_19 = _s2_write_T_18 | _s2_write_T_15; // @[package.scala:16:47, :81:59]
wire _s2_write_T_20 = _s2_write_T_19 | _s2_write_T_16; // @[package.scala:16:47, :81:59]
wire _s2_write_T_21 = _s2_write_T_11 | _s2_write_T_20; // @[package.scala:81:59]
wire s2_write = _s2_write_T_4 | _s2_write_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire s2_readwrite = s2_read | s2_write; // @[DCache.scala:354:30]
reg s2_flush_valid_pre_tag_ecc; // @[DCache.scala:355:43]
wire s2_flush_valid = s2_flush_valid_pre_tag_ecc; // @[DCache.scala:355:43, :363:51]
wire s1_meta_clk_en = _s1_meta_clk_en_T | s1_probe; // @[DCache.scala:183:25, :357:{44,62}]
reg [25:0] s2_meta_corrected_r; // @[DCache.scala:361:61]
wire [25:0] _s2_meta_corrected_WIRE = s2_meta_corrected_r; // @[DCache.scala:361:{61,99}]
wire [1:0] _s2_meta_corrected_T_1; // @[DCache.scala:361:99]
wire [23:0] _s2_meta_corrected_T; // @[DCache.scala:361:99]
wire [1:0] s2_meta_corrected_0_coh_state; // @[DCache.scala:361:99]
wire [23:0] s2_meta_corrected_0_tag; // @[DCache.scala:361:99]
assign _s2_meta_corrected_T = _s2_meta_corrected_WIRE[23:0]; // @[DCache.scala:361:99]
assign s2_meta_corrected_0_tag = _s2_meta_corrected_T; // @[DCache.scala:361:99]
assign _s2_meta_corrected_T_1 = _s2_meta_corrected_WIRE[25:24]; // @[DCache.scala:361:99]
assign s2_meta_corrected_0_coh_state = _s2_meta_corrected_T_1; // @[DCache.scala:361:99]
reg [25:0] s2_meta_corrected_r_1; // @[DCache.scala:361:61]
wire [25:0] _s2_meta_corrected_WIRE_1 = s2_meta_corrected_r_1; // @[DCache.scala:361:{61,99}]
wire [1:0] _s2_meta_corrected_T_3; // @[DCache.scala:361:99]
wire [23:0] _s2_meta_corrected_T_2; // @[DCache.scala:361:99]
wire [1:0] s2_meta_corrected_1_coh_state; // @[DCache.scala:361:99]
wire [23:0] s2_meta_corrected_1_tag; // @[DCache.scala:361:99]
assign _s2_meta_corrected_T_2 = _s2_meta_corrected_WIRE_1[23:0]; // @[DCache.scala:361:99]
assign s2_meta_corrected_1_tag = _s2_meta_corrected_T_2; // @[DCache.scala:361:99]
assign _s2_meta_corrected_T_3 = _s2_meta_corrected_WIRE_1[25:24]; // @[DCache.scala:361:99]
assign s2_meta_corrected_1_coh_state = _s2_meta_corrected_T_3; // @[DCache.scala:361:99]
reg [25:0] s2_meta_corrected_r_2; // @[DCache.scala:361:61]
wire [25:0] _s2_meta_corrected_WIRE_2 = s2_meta_corrected_r_2; // @[DCache.scala:361:{61,99}]
wire [1:0] _s2_meta_corrected_T_5; // @[DCache.scala:361:99]
wire [23:0] _s2_meta_corrected_T_4; // @[DCache.scala:361:99]
wire [1:0] s2_meta_corrected_2_coh_state; // @[DCache.scala:361:99]
wire [23:0] s2_meta_corrected_2_tag; // @[DCache.scala:361:99]
assign _s2_meta_corrected_T_4 = _s2_meta_corrected_WIRE_2[23:0]; // @[DCache.scala:361:99]
assign s2_meta_corrected_2_tag = _s2_meta_corrected_T_4; // @[DCache.scala:361:99]
assign _s2_meta_corrected_T_5 = _s2_meta_corrected_WIRE_2[25:24]; // @[DCache.scala:361:99]
assign s2_meta_corrected_2_coh_state = _s2_meta_corrected_T_5; // @[DCache.scala:361:99]
reg [25:0] s2_meta_corrected_r_3; // @[DCache.scala:361:61]
wire [25:0] _s2_meta_corrected_WIRE_3 = s2_meta_corrected_r_3; // @[DCache.scala:361:{61,99}]
wire [1:0] _s2_meta_corrected_T_7; // @[DCache.scala:361:99]
wire [23:0] _s2_meta_corrected_T_6; // @[DCache.scala:361:99]
wire [1:0] _s2_first_meta_corrected_T_4_coh_state = s2_meta_corrected_3_coh_state; // @[Mux.scala:50:70]
wire [23:0] _s2_first_meta_corrected_T_4_tag = s2_meta_corrected_3_tag; // @[Mux.scala:50:70]
assign _s2_meta_corrected_T_6 = _s2_meta_corrected_WIRE_3[23:0]; // @[DCache.scala:361:99]
assign s2_meta_corrected_3_tag = _s2_meta_corrected_T_6; // @[DCache.scala:361:99]
assign _s2_meta_corrected_T_7 = _s2_meta_corrected_WIRE_3[25:24]; // @[DCache.scala:361:99]
assign s2_meta_corrected_3_coh_state = _s2_meta_corrected_T_7; // @[DCache.scala:361:99]
wire _s2_data_en_T = s1_valid | inWriteback; // @[package.scala:81:59]
wire s2_data_en = _s2_data_en_T | io_cpu_replay_next_0; // @[DCache.scala:101:7, :366:{23,38}]
wire s2_data_word_en = inWriteback | _s2_data_word_en_T; // @[package.scala:81:59]
wire _s2_data_s1_word_en_T = ~io_cpu_replay_next_0; // @[DCache.scala:101:7, :377:28]
wire s2_data_s1_word_en = ~_s2_data_s1_word_en_T | s2_data_word_en; // @[DCache.scala:367:22, :377:{27,28}]
wire _s2_data_T = s2_data_s1_word_en; // @[DCache.scala:377:27, :379:39]
wire [4:0] _s2_data_T_1 = _s2_data_T ? s1_data_way : 5'h0; // @[DCache.scala:323:32, :379:{28,39}]
wire _s2_data_T_2 = _s2_data_T_1[0]; // @[Mux.scala:32:36]
wire _s2_data_T_3 = _s2_data_T_1[1]; // @[Mux.scala:32:36]
wire _s2_data_T_4 = _s2_data_T_1[2]; // @[Mux.scala:32:36]
wire _s2_data_T_5 = _s2_data_T_1[3]; // @[Mux.scala:32:36]
wire _s2_data_T_6 = _s2_data_T_1[4]; // @[Mux.scala:32:36]
wire [63:0] _s2_data_T_7 = _s2_data_T_2 ? s2_data_s1_way_words_0_0 : 64'h0; // @[Mux.scala:30:73, :32:36]
wire [63:0] _s2_data_T_8 = _s2_data_T_3 ? s2_data_s1_way_words_1_0 : 64'h0; // @[Mux.scala:30:73, :32:36]
wire [63:0] _s2_data_T_9 = _s2_data_T_4 ? s2_data_s1_way_words_2_0 : 64'h0; // @[Mux.scala:30:73, :32:36]
wire [63:0] _s2_data_T_10 = _s2_data_T_5 ? s2_data_s1_way_words_3_0 : 64'h0; // @[Mux.scala:30:73, :32:36]
wire [63:0] _s2_data_T_11 = _s2_data_T_6 ? s2_data_s1_way_words_4_0 : 64'h0; // @[Mux.scala:30:73, :32:36]
wire [63:0] _s2_data_T_12 = _s2_data_T_7 | _s2_data_T_8; // @[Mux.scala:30:73]
wire [63:0] _s2_data_T_13 = _s2_data_T_12 | _s2_data_T_9; // @[Mux.scala:30:73]
wire [63:0] _s2_data_T_14 = _s2_data_T_13 | _s2_data_T_10; // @[Mux.scala:30:73]
wire [63:0] _s2_data_T_15 = _s2_data_T_14 | _s2_data_T_11; // @[Mux.scala:30:73]
wire [63:0] _s2_data_WIRE = _s2_data_T_15; // @[Mux.scala:30:73]
reg [63:0] s2_data; // @[DCache.scala:379:18]
reg [3:0] s2_probe_way; // @[DCache.scala:383:31]
reg [1:0] s2_probe_state_state; // @[DCache.scala:384:33]
reg [3:0] s2_hit_way; // @[DCache.scala:385:29]
reg [1:0] s2_hit_state_state; // @[DCache.scala:386:31]
wire s2_hit_valid = |s2_hit_state_state; // @[Metadata.scala:50:45]
wire _r_c_cat_T_2 = _r_c_cat_T | _r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _r_c_cat_T_4 = _r_c_cat_T_2 | _r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _r_c_cat_T_9 = _r_c_cat_T_5 | _r_c_cat_T_6; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_10 = _r_c_cat_T_9 | _r_c_cat_T_7; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_11 = _r_c_cat_T_10 | _r_c_cat_T_8; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_17 = _r_c_cat_T_12 | _r_c_cat_T_13; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_18 = _r_c_cat_T_17 | _r_c_cat_T_14; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_19 = _r_c_cat_T_18 | _r_c_cat_T_15; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_20 = _r_c_cat_T_19 | _r_c_cat_T_16; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_21 = _r_c_cat_T_11 | _r_c_cat_T_20; // @[package.scala:81:59]
wire _r_c_cat_T_22 = _r_c_cat_T_4 | _r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _r_c_cat_T_25 = _r_c_cat_T_23 | _r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _r_c_cat_T_27 = _r_c_cat_T_25 | _r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _r_c_cat_T_32 = _r_c_cat_T_28 | _r_c_cat_T_29; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_33 = _r_c_cat_T_32 | _r_c_cat_T_30; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_34 = _r_c_cat_T_33 | _r_c_cat_T_31; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_40 = _r_c_cat_T_35 | _r_c_cat_T_36; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_41 = _r_c_cat_T_40 | _r_c_cat_T_37; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_42 = _r_c_cat_T_41 | _r_c_cat_T_38; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_43 = _r_c_cat_T_42 | _r_c_cat_T_39; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_44 = _r_c_cat_T_34 | _r_c_cat_T_43; // @[package.scala:81:59]
wire _r_c_cat_T_45 = _r_c_cat_T_27 | _r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _GEN_75 = s2_req_cmd == 5'h3; // @[DCache.scala:339:19]
wire _r_c_cat_T_46; // @[Consts.scala:91:54]
assign _r_c_cat_T_46 = _GEN_75; // @[Consts.scala:91:54]
wire _metaArb_io_in_3_bits_data_c_cat_T_46; // @[Consts.scala:91:54]
assign _metaArb_io_in_3_bits_data_c_cat_T_46 = _GEN_75; // @[Consts.scala:91:54]
wire _r_c_cat_T_47 = _r_c_cat_T_45 | _r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _r_c_cat_T_49 = _r_c_cat_T_47 | _r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] r_c = {_r_c_cat_T_22, _r_c_cat_T_49}; // @[Metadata.scala:29:18]
wire [3:0] _r_T = {r_c, s2_hit_state_state}; // @[Metadata.scala:29:18, :58:19]
wire _r_T_25 = _r_T == 4'hC; // @[Misc.scala:49:20]
wire [1:0] _r_T_27 = {1'h0, _r_T_25}; // @[Misc.scala:35:36, :49:20]
wire _r_T_28 = _r_T == 4'hD; // @[Misc.scala:49:20]
wire [1:0] _r_T_30 = _r_T_28 ? 2'h2 : _r_T_27; // @[Misc.scala:35:36, :49:20]
wire _r_T_31 = _r_T == 4'h4; // @[Misc.scala:49:20]
wire [1:0] _r_T_33 = _r_T_31 ? 2'h1 : _r_T_30; // @[Misc.scala:35:36, :49:20]
wire _r_T_34 = _r_T == 4'h5; // @[Misc.scala:49:20]
wire [1:0] _r_T_36 = _r_T_34 ? 2'h2 : _r_T_33; // @[Misc.scala:35:36, :49:20]
wire _r_T_37 = _r_T == 4'h0; // @[Misc.scala:49:20]
wire [1:0] _r_T_39 = _r_T_37 ? 2'h0 : _r_T_36; // @[Misc.scala:35:36, :49:20]
wire _r_T_40 = _r_T == 4'hE; // @[Misc.scala:49:20]
wire _r_T_41 = _r_T_40; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_42 = _r_T_40 ? 2'h3 : _r_T_39; // @[Misc.scala:35:36, :49:20]
wire _r_T_43 = &_r_T; // @[Misc.scala:49:20]
wire _r_T_44 = _r_T_43 | _r_T_41; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_45 = _r_T_43 ? 2'h3 : _r_T_42; // @[Misc.scala:35:36, :49:20]
wire _r_T_46 = _r_T == 4'h6; // @[Misc.scala:49:20]
wire _r_T_47 = _r_T_46 | _r_T_44; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_48 = _r_T_46 ? 2'h2 : _r_T_45; // @[Misc.scala:35:36, :49:20]
wire _r_T_49 = _r_T == 4'h7; // @[Misc.scala:49:20]
wire _r_T_50 = _r_T_49 | _r_T_47; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_51 = _r_T_49 ? 2'h3 : _r_T_48; // @[Misc.scala:35:36, :49:20]
wire _r_T_52 = _r_T == 4'h1; // @[Misc.scala:49:20]
wire _r_T_53 = _r_T_52 | _r_T_50; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_54 = _r_T_52 ? 2'h1 : _r_T_51; // @[Misc.scala:35:36, :49:20]
wire _r_T_55 = _r_T == 4'h2; // @[Misc.scala:49:20]
wire _r_T_56 = _r_T_55 | _r_T_53; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_57 = _r_T_55 ? 2'h2 : _r_T_54; // @[Misc.scala:35:36, :49:20]
wire _r_T_58 = _r_T == 4'h3; // @[Misc.scala:49:20]
wire s2_hit = _r_T_58 | _r_T_56; // @[Misc.scala:35:9, :49:20]
wire [1:0] s2_grow_param = _r_T_58 ? 2'h3 : _r_T_57; // @[Misc.scala:35:36, :49:20]
wire [1:0] s2_new_hit_state_state = s2_grow_param; // @[Misc.scala:35:36]
wire [1:0] metaArb_io_in_2_bits_data_meta_coh_state = s2_new_hit_state_state; // @[Metadata.scala:160:20]
wire [15:0] s2_data_corrected_lo_lo = s2_data[15:0]; // @[package.scala:45:27]
wire [15:0] s2_data_uncorrected_lo_lo = s2_data[15:0]; // @[package.scala:45:27]
wire [15:0] s2_data_corrected_lo_hi = s2_data[31:16]; // @[package.scala:45:27]
wire [15:0] s2_data_uncorrected_lo_hi = s2_data[31:16]; // @[package.scala:45:27]
wire [31:0] s2_data_corrected_lo = {s2_data_corrected_lo_hi, s2_data_corrected_lo_lo}; // @[package.scala:45:27]
wire [15:0] s2_data_corrected_hi_lo = s2_data[47:32]; // @[package.scala:45:27]
wire [15:0] s2_data_uncorrected_hi_lo = s2_data[47:32]; // @[package.scala:45:27]
wire [15:0] s2_data_corrected_hi_hi = s2_data[63:48]; // @[package.scala:45:27]
wire [15:0] s2_data_uncorrected_hi_hi = s2_data[63:48]; // @[package.scala:45:27]
wire [31:0] s2_data_corrected_hi = {s2_data_corrected_hi_hi, s2_data_corrected_hi_lo}; // @[package.scala:45:27]
assign s2_data_corrected = {s2_data_corrected_hi, s2_data_corrected_lo}; // @[package.scala:45:27]
assign nodeOut_c_bits_data = s2_data_corrected; // @[package.scala:45:27]
wire [63:0] s2_data_word_corrected = s2_data_corrected; // @[package.scala:45:27]
wire [31:0] s2_data_uncorrected_lo = {s2_data_uncorrected_lo_hi, s2_data_uncorrected_lo_lo}; // @[package.scala:45:27]
wire [31:0] s2_data_uncorrected_hi = {s2_data_uncorrected_hi_hi, s2_data_uncorrected_hi_lo}; // @[package.scala:45:27]
wire [63:0] s2_data_uncorrected = {s2_data_uncorrected_hi, s2_data_uncorrected_lo}; // @[package.scala:45:27]
assign s2_data_word = s2_data_uncorrected; // @[package.scala:45:27]
wire s2_valid_hit_maybe_flush_pre_data_ecc_and_waw = _s2_valid_hit_maybe_flush_pre_data_ecc_and_waw_T_1 & s2_hit; // @[Misc.scala:35:9]
wire _s2_valid_hit_pre_data_ecc_and_waw_T = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw & s2_readwrite; // @[DCache.scala:354:30, :397:89, :418:89]
wire s2_valid_hit_pre_data_ecc_and_waw = _s2_valid_hit_pre_data_ecc_and_waw_T; // @[DCache.scala:418:{89,105}]
wire s2_valid_hit_pre_data_ecc = s2_valid_hit_pre_data_ecc_and_waw; // @[DCache.scala:418:105, :420:69]
wire s2_valid_flush_line = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw & s2_cmd_flush_line; // @[DCache.scala:341:54, :397:89, :419:75]
wire _s2_victim_tag_T = s2_valid_flush_line; // @[DCache.scala:419:75, :433:47]
wire s2_valid_hit = s2_valid_hit_pre_data_ecc; // @[DCache.scala:420:69, :422:48]
wire _s2_valid_miss_T = s2_valid_masked & s2_readwrite; // @[DCache.scala:337:42, :354:30, :423:39]
wire _s2_valid_miss_T_2 = _s2_valid_miss_T; // @[DCache.scala:423:{39,55}]
wire _s2_valid_miss_T_3 = ~s2_hit; // @[Misc.scala:35:9]
wire s2_valid_miss = _s2_valid_miss_T_2 & _s2_valid_miss_T_3; // @[DCache.scala:423:{55,73,76}]
wire _s2_uncached_T = ~s2_pma_cacheable; // @[DCache.scala:343:19, :424:21]
wire _s2_uncached_T_1 = ~s2_pma_must_alloc; // @[DCache.scala:343:19, :424:61]
wire _s2_uncached_T_2 = s2_req_no_alloc & _s2_uncached_T_1; // @[DCache.scala:339:19, :424:{58,61}]
wire _s2_uncached_T_3 = ~s2_hit_valid; // @[Metadata.scala:50:45]
wire _s2_uncached_T_4 = _s2_uncached_T_2 & _s2_uncached_T_3; // @[DCache.scala:424:{58,80,83}]
wire s2_uncached = _s2_uncached_T | _s2_uncached_T_4; // @[DCache.scala:424:{21,39,80}]
wire _s2_valid_cached_miss_T = ~s2_uncached; // @[DCache.scala:424:39, :425:47]
wire _s2_valid_cached_miss_T_1 = s2_valid_miss & _s2_valid_cached_miss_T; // @[DCache.scala:423:73, :425:{44,47}]
wire _s2_valid_cached_miss_T_3 = ~_s2_valid_cached_miss_T_2; // @[DCache.scala:425:{63,88}]
wire s2_valid_cached_miss = _s2_valid_cached_miss_T_1 & _s2_valid_cached_miss_T_3; // @[DCache.scala:425:{44,60,63}]
wire _s2_want_victimize_T = s2_valid_cached_miss | s2_valid_flush_line; // @[DCache.scala:419:75, :425:60, :427:77]
wire _s2_want_victimize_T_1 = _s2_want_victimize_T; // @[DCache.scala:427:{77,100}]
wire _s2_want_victimize_T_2 = _s2_want_victimize_T_1 | s2_flush_valid; // @[DCache.scala:363:51, :427:{100,123}]
wire s2_want_victimize = _s2_want_victimize_T_2; // @[DCache.scala:427:{52,123}]
wire s2_victimize = s2_want_victimize; // @[DCache.scala:427:52, :429:40]
wire _s2_cannot_victimize_T = ~s2_flush_valid; // @[DCache.scala:363:51, :428:29]
wire _s2_valid_uncached_pending_T = s2_valid_miss & s2_uncached; // @[DCache.scala:423:73, :424:39, :430:49]
wire _s2_valid_uncached_pending_T_2 = ~_s2_valid_uncached_pending_T_1; // @[DCache.scala:430:{67,92}]
wire s2_valid_uncached_pending = _s2_valid_uncached_pending_T & _s2_valid_uncached_pending_T_2; // @[DCache.scala:430:{49,64,67}]
reg [1:0] s2_victim_way_r; // @[DCache.scala:431:41]
wire [3:0] s2_victim_way = 4'h1 << s2_victim_way_r; // @[OneHot.scala:58:35]
assign s2_victim_or_hit_way = s2_hit_valid ? s2_hit_way : s2_victim_way; // @[OneHot.scala:58:35]
assign metaArb_io_in_2_bits_way_en = s2_victim_or_hit_way; // @[DCache.scala:135:28, :432:33]
wire [23:0] _s2_victim_tag_T_1 = s2_req_addr[31:8]; // @[DCache.scala:339:19, :433:82]
wire _s2_victim_tag_T_2 = s2_victim_way[0]; // @[OneHot.scala:58:35]
wire _s2_victim_state_T = s2_victim_way[0]; // @[OneHot.scala:58:35]
wire _s2_victim_tag_T_3 = s2_victim_way[1]; // @[OneHot.scala:58:35]
wire _s2_victim_state_T_1 = s2_victim_way[1]; // @[OneHot.scala:58:35]
wire _s2_victim_tag_T_4 = s2_victim_way[2]; // @[OneHot.scala:58:35]
wire _s2_victim_state_T_2 = s2_victim_way[2]; // @[OneHot.scala:58:35]
wire _s2_victim_tag_T_5 = s2_victim_way[3]; // @[OneHot.scala:58:35]
wire _s2_victim_state_T_3 = s2_victim_way[3]; // @[OneHot.scala:58:35]
wire [1:0] _s2_victim_tag_WIRE_2_state; // @[Mux.scala:30:73]
wire [23:0] _s2_victim_tag_WIRE_1; // @[Mux.scala:30:73]
wire [23:0] _s2_victim_tag_T_6 = _s2_victim_tag_T_2 ? s2_meta_corrected_0_tag : 24'h0; // @[Mux.scala:30:73, :32:36]
wire [23:0] _s2_victim_tag_T_7 = _s2_victim_tag_T_3 ? s2_meta_corrected_1_tag : 24'h0; // @[Mux.scala:30:73, :32:36]
wire [23:0] _s2_victim_tag_T_8 = _s2_victim_tag_T_4 ? s2_meta_corrected_2_tag : 24'h0; // @[Mux.scala:30:73, :32:36]
wire [23:0] _s2_victim_tag_T_9 = _s2_victim_tag_T_5 ? s2_meta_corrected_3_tag : 24'h0; // @[Mux.scala:30:73, :32:36]
wire [23:0] _s2_victim_tag_T_10 = _s2_victim_tag_T_6 | _s2_victim_tag_T_7; // @[Mux.scala:30:73]
wire [23:0] _s2_victim_tag_T_11 = _s2_victim_tag_T_10 | _s2_victim_tag_T_8; // @[Mux.scala:30:73]
wire [23:0] _s2_victim_tag_T_12 = _s2_victim_tag_T_11 | _s2_victim_tag_T_9; // @[Mux.scala:30:73]
assign _s2_victim_tag_WIRE_1 = _s2_victim_tag_T_12; // @[Mux.scala:30:73]
wire [23:0] _s2_victim_tag_WIRE_tag = _s2_victim_tag_WIRE_1; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_tag_WIRE_3; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_tag_WIRE_coh_state = _s2_victim_tag_WIRE_2_state; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_tag_T_13 = _s2_victim_tag_T_2 ? s2_meta_corrected_0_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_victim_tag_T_14 = _s2_victim_tag_T_3 ? s2_meta_corrected_1_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_victim_tag_T_15 = _s2_victim_tag_T_4 ? s2_meta_corrected_2_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_victim_tag_T_16 = _s2_victim_tag_T_5 ? s2_meta_corrected_3_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_victim_tag_T_17 = _s2_victim_tag_T_13 | _s2_victim_tag_T_14; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_tag_T_18 = _s2_victim_tag_T_17 | _s2_victim_tag_T_15; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_tag_T_19 = _s2_victim_tag_T_18 | _s2_victim_tag_T_16; // @[Mux.scala:30:73]
assign _s2_victim_tag_WIRE_3 = _s2_victim_tag_T_19; // @[Mux.scala:30:73]
assign _s2_victim_tag_WIRE_2_state = _s2_victim_tag_WIRE_3; // @[Mux.scala:30:73]
wire [23:0] s2_victim_tag = _s2_victim_tag_T ? _s2_victim_tag_T_1 : _s2_victim_tag_WIRE_tag; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_state_WIRE_2_state; // @[Mux.scala:30:73]
wire [23:0] _s2_victim_state_WIRE_1; // @[Mux.scala:30:73]
wire [23:0] _s2_victim_state_T_4 = _s2_victim_state_T ? s2_meta_corrected_0_tag : 24'h0; // @[Mux.scala:30:73, :32:36]
wire [23:0] _s2_victim_state_T_5 = _s2_victim_state_T_1 ? s2_meta_corrected_1_tag : 24'h0; // @[Mux.scala:30:73, :32:36]
wire [23:0] _s2_victim_state_T_6 = _s2_victim_state_T_2 ? s2_meta_corrected_2_tag : 24'h0; // @[Mux.scala:30:73, :32:36]
wire [23:0] _s2_victim_state_T_7 = _s2_victim_state_T_3 ? s2_meta_corrected_3_tag : 24'h0; // @[Mux.scala:30:73, :32:36]
wire [23:0] _s2_victim_state_T_8 = _s2_victim_state_T_4 | _s2_victim_state_T_5; // @[Mux.scala:30:73]
wire [23:0] _s2_victim_state_T_9 = _s2_victim_state_T_8 | _s2_victim_state_T_6; // @[Mux.scala:30:73]
wire [23:0] _s2_victim_state_T_10 = _s2_victim_state_T_9 | _s2_victim_state_T_7; // @[Mux.scala:30:73]
assign _s2_victim_state_WIRE_1 = _s2_victim_state_T_10; // @[Mux.scala:30:73]
wire [23:0] _s2_victim_state_WIRE_tag = _s2_victim_state_WIRE_1; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_state_WIRE_3; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_state_WIRE_coh_state = _s2_victim_state_WIRE_2_state; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_state_T_11 = _s2_victim_state_T ? s2_meta_corrected_0_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_victim_state_T_12 = _s2_victim_state_T_1 ? s2_meta_corrected_1_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_victim_state_T_13 = _s2_victim_state_T_2 ? s2_meta_corrected_2_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_victim_state_T_14 = _s2_victim_state_T_3 ? s2_meta_corrected_3_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _s2_victim_state_T_15 = _s2_victim_state_T_11 | _s2_victim_state_T_12; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_state_T_16 = _s2_victim_state_T_15 | _s2_victim_state_T_13; // @[Mux.scala:30:73]
wire [1:0] _s2_victim_state_T_17 = _s2_victim_state_T_16 | _s2_victim_state_T_14; // @[Mux.scala:30:73]
assign _s2_victim_state_WIRE_3 = _s2_victim_state_T_17; // @[Mux.scala:30:73]
assign _s2_victim_state_WIRE_2_state = _s2_victim_state_WIRE_3; // @[Mux.scala:30:73]
wire [1:0] s2_victim_state_state = s2_hit_valid ? s2_hit_state_state : _s2_victim_state_WIRE_coh_state; // @[Mux.scala:30:73]
wire [3:0] _r_T_59 = {probe_bits_param, s2_probe_state_state}; // @[Metadata.scala:120:19]
wire _r_T_72 = _r_T_59 == 4'h8; // @[Misc.scala:56:20]
wire [2:0] _r_T_74 = _r_T_72 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20]
wire _r_T_76 = _r_T_59 == 4'h9; // @[Misc.scala:56:20]
wire [2:0] _r_T_78 = _r_T_76 ? 3'h2 : _r_T_74; // @[Misc.scala:38:36, :56:20]
wire _r_T_80 = _r_T_59 == 4'hA; // @[Misc.scala:56:20]
wire [2:0] _r_T_82 = _r_T_80 ? 3'h1 : _r_T_78; // @[Misc.scala:38:36, :56:20]
wire _r_T_84 = _r_T_59 == 4'hB; // @[Misc.scala:56:20]
wire _r_T_85 = _r_T_84; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_86 = _r_T_84 ? 3'h1 : _r_T_82; // @[Misc.scala:38:36, :56:20]
wire _r_T_88 = _r_T_59 == 4'h4; // @[Misc.scala:56:20]
wire _r_T_89 = ~_r_T_88 & _r_T_85; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_90 = _r_T_88 ? 3'h5 : _r_T_86; // @[Misc.scala:38:36, :56:20]
wire _r_T_92 = _r_T_59 == 4'h5; // @[Misc.scala:56:20]
wire _r_T_93 = ~_r_T_92 & _r_T_89; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_94 = _r_T_92 ? 3'h4 : _r_T_90; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_95 = {1'h0, _r_T_92}; // @[Misc.scala:38:63, :56:20]
wire _r_T_96 = _r_T_59 == 4'h6; // @[Misc.scala:56:20]
wire _r_T_97 = ~_r_T_96 & _r_T_93; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_98 = _r_T_96 ? 3'h0 : _r_T_94; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_99 = _r_T_96 ? 2'h1 : _r_T_95; // @[Misc.scala:38:63, :56:20]
wire _r_T_100 = _r_T_59 == 4'h7; // @[Misc.scala:56:20]
wire _r_T_101 = _r_T_100 | _r_T_97; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_102 = _r_T_100 ? 3'h0 : _r_T_98; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_103 = _r_T_100 ? 2'h1 : _r_T_99; // @[Misc.scala:38:63, :56:20]
wire _r_T_104 = _r_T_59 == 4'h0; // @[Misc.scala:56:20]
wire _r_T_105 = ~_r_T_104 & _r_T_101; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_106 = _r_T_104 ? 3'h5 : _r_T_102; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_107 = _r_T_104 ? 2'h0 : _r_T_103; // @[Misc.scala:38:63, :56:20]
wire _r_T_108 = _r_T_59 == 4'h1; // @[Misc.scala:56:20]
wire _r_T_109 = ~_r_T_108 & _r_T_105; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_110 = _r_T_108 ? 3'h4 : _r_T_106; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_111 = _r_T_108 ? 2'h1 : _r_T_107; // @[Misc.scala:38:63, :56:20]
wire _r_T_112 = _r_T_59 == 4'h2; // @[Misc.scala:56:20]
wire _r_T_113 = ~_r_T_112 & _r_T_109; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_114 = _r_T_112 ? 3'h3 : _r_T_110; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_115 = _r_T_112 ? 2'h2 : _r_T_111; // @[Misc.scala:38:63, :56:20]
wire _r_T_116 = _r_T_59 == 4'h3; // @[Misc.scala:56:20]
wire s2_prb_ack_data = _r_T_116 | _r_T_113; // @[Misc.scala:38:9, :56:20]
wire [2:0] s2_report_param = _r_T_116 ? 3'h3 : _r_T_114; // @[Misc.scala:38:36, :56:20]
wire [2:0] cleanReleaseMessage_param = s2_report_param; // @[Misc.scala:38:36]
wire [2:0] dirtyReleaseMessage_param = s2_report_param; // @[Misc.scala:38:36]
wire [1:0] r_3 = _r_T_116 ? 2'h2 : _r_T_115; // @[Misc.scala:38:63, :56:20]
wire [1:0] probeNewCoh_state = r_3; // @[Misc.scala:38:63]
wire [3:0] _r_T_123 = {2'h2, s2_victim_state_state}; // @[Metadata.scala:120:19]
wire _r_T_136 = _r_T_123 == 4'h8; // @[Misc.scala:56:20]
wire [2:0] _r_T_138 = _r_T_136 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20]
wire _r_T_140 = _r_T_123 == 4'h9; // @[Misc.scala:56:20]
wire [2:0] _r_T_142 = _r_T_140 ? 3'h2 : _r_T_138; // @[Misc.scala:38:36, :56:20]
wire _r_T_144 = _r_T_123 == 4'hA; // @[Misc.scala:56:20]
wire [2:0] _r_T_146 = _r_T_144 ? 3'h1 : _r_T_142; // @[Misc.scala:38:36, :56:20]
wire _r_T_148 = _r_T_123 == 4'hB; // @[Misc.scala:56:20]
wire _r_T_149 = _r_T_148; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_150 = _r_T_148 ? 3'h1 : _r_T_146; // @[Misc.scala:38:36, :56:20]
wire _r_T_152 = _r_T_123 == 4'h4; // @[Misc.scala:56:20]
wire _r_T_153 = ~_r_T_152 & _r_T_149; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_154 = _r_T_152 ? 3'h5 : _r_T_150; // @[Misc.scala:38:36, :56:20]
wire _r_T_156 = _r_T_123 == 4'h5; // @[Misc.scala:56:20]
wire _r_T_157 = ~_r_T_156 & _r_T_153; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_158 = _r_T_156 ? 3'h4 : _r_T_154; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_159 = {1'h0, _r_T_156}; // @[Misc.scala:38:63, :56:20]
wire _r_T_160 = _r_T_123 == 4'h6; // @[Misc.scala:56:20]
wire _r_T_161 = ~_r_T_160 & _r_T_157; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_162 = _r_T_160 ? 3'h0 : _r_T_158; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_163 = _r_T_160 ? 2'h1 : _r_T_159; // @[Misc.scala:38:63, :56:20]
wire _r_T_164 = _r_T_123 == 4'h7; // @[Misc.scala:56:20]
wire _r_T_165 = _r_T_164 | _r_T_161; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_166 = _r_T_164 ? 3'h0 : _r_T_162; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_167 = _r_T_164 ? 2'h1 : _r_T_163; // @[Misc.scala:38:63, :56:20]
wire _r_T_168 = _r_T_123 == 4'h0; // @[Misc.scala:56:20]
wire _r_T_169 = ~_r_T_168 & _r_T_165; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_170 = _r_T_168 ? 3'h5 : _r_T_166; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_171 = _r_T_168 ? 2'h0 : _r_T_167; // @[Misc.scala:38:63, :56:20]
wire _r_T_172 = _r_T_123 == 4'h1; // @[Misc.scala:56:20]
wire _r_T_173 = ~_r_T_172 & _r_T_169; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_174 = _r_T_172 ? 3'h4 : _r_T_170; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_175 = _r_T_172 ? 2'h1 : _r_T_171; // @[Misc.scala:38:63, :56:20]
wire _r_T_176 = _r_T_123 == 4'h2; // @[Misc.scala:56:20]
wire _r_T_177 = ~_r_T_176 & _r_T_173; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_178 = _r_T_176 ? 3'h3 : _r_T_174; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_179 = _r_T_176 ? 2'h2 : _r_T_175; // @[Misc.scala:38:63, :56:20]
wire _r_T_180 = _r_T_123 == 4'h3; // @[Misc.scala:56:20]
wire s2_victim_dirty = _r_T_180 | _r_T_177; // @[Misc.scala:38:9, :56:20]
wire [2:0] s2_shrink_param = _r_T_180 ? 3'h3 : _r_T_178; // @[Misc.scala:38:36, :56:20]
wire [2:0] nodeOut_c_bits_c_param = s2_shrink_param; // @[Misc.scala:38:36]
wire [2:0] nodeOut_c_bits_c_1_param = s2_shrink_param; // @[Misc.scala:38:36]
wire [1:0] r_3_1 = _r_T_180 ? 2'h2 : _r_T_179; // @[Misc.scala:38:63, :56:20]
wire [1:0] voluntaryNewCoh_state = r_3_1; // @[Misc.scala:38:63]
wire _s2_update_meta_T = s2_hit_state_state == s2_new_hit_state_state; // @[Metadata.scala:46:46, :160:20]
wire s2_update_meta = ~_s2_update_meta_T; // @[Metadata.scala:46:46, :47:40]
wire s2_dont_nack_uncached = s2_valid_uncached_pending & tl_out_a_ready; // @[DCache.scala:159:22, :430:64, :440:57]
wire _s2_dont_nack_misc_T_7 = ~s2_hit; // @[Misc.scala:35:9]
wire _s2_dont_nack_misc_T_10 = s2_req_cmd == 5'h17; // @[DCache.scala:339:19, :444:17]
wire _s2_dont_nack_misc_T_11 = _s2_dont_nack_misc_T_10; // @[DCache.scala:443:55, :444:17]
wire s2_dont_nack_misc = _s2_dont_nack_misc_T_1 & _s2_dont_nack_misc_T_11; // @[DCache.scala:441:{43,61}, :443:55]
wire _io_cpu_s2_nack_T = ~s2_dont_nack_uncached; // @[DCache.scala:440:57, :445:41]
wire _io_cpu_s2_nack_T_1 = s2_valid_no_xcpt & _io_cpu_s2_nack_T; // @[DCache.scala:332:35, :445:{38,41}]
wire _io_cpu_s2_nack_T_2 = ~s2_dont_nack_misc; // @[DCache.scala:441:61, :445:67]
wire _io_cpu_s2_nack_T_3 = _io_cpu_s2_nack_T_1 & _io_cpu_s2_nack_T_2; // @[DCache.scala:445:{38,64,67}]
wire _io_cpu_s2_nack_T_4 = ~s2_valid_hit; // @[DCache.scala:422:48, :445:89]
assign _io_cpu_s2_nack_T_5 = _io_cpu_s2_nack_T_3 & _io_cpu_s2_nack_T_4; // @[DCache.scala:445:{64,86,89}]
assign io_cpu_s2_nack_0 = _io_cpu_s2_nack_T_5; // @[DCache.scala:101:7, :445:86]
assign _metaArb_io_in_2_valid_T = s2_valid_hit_pre_data_ecc_and_waw & s2_update_meta; // @[Metadata.scala:47:40]
wire _T_40 = io_cpu_s2_nack_0 | _metaArb_io_in_2_valid_T; // @[DCache.scala:101:7, :446:24, :462:63]
wire [1:0] _s2_first_meta_corrected_T_5_coh_state = _s2_first_meta_corrected_T_4_coh_state; // @[Mux.scala:50:70]
wire [23:0] _s2_first_meta_corrected_T_5_tag = _s2_first_meta_corrected_T_4_tag; // @[Mux.scala:50:70]
wire [1:0] s2_first_meta_corrected_coh_state = _s2_first_meta_corrected_T_5_coh_state; // @[Mux.scala:50:70]
wire [23:0] s2_first_meta_corrected_tag = _s2_first_meta_corrected_T_5_tag; // @[Mux.scala:50:70]
wire [1:0] metaArb_io_in_1_bits_data_new_meta_coh_state = s2_first_meta_corrected_coh_state; // @[Mux.scala:50:70]
wire [23:0] metaArb_io_in_1_bits_data_new_meta_tag = s2_first_meta_corrected_tag; // @[Mux.scala:50:70]
wire _metaArb_io_in_1_valid_T = s2_valid_masked | s2_flush_valid_pre_tag_ecc; // @[DCache.scala:337:42, :355:43, :450:63]
wire _metaArb_io_in_1_valid_T_1 = _metaArb_io_in_1_valid_T | s2_probe; // @[DCache.scala:333:25, :450:{63,93}]
wire [1:0] _metaArb_io_in_1_bits_idx_T = probe_bits_address[7:6]; // @[DCache.scala:184:29, :1200:47]
wire [1:0] _metaArb_io_in_6_bits_idx_T_1 = probe_bits_address[7:6]; // @[DCache.scala:184:29, :1200:47]
wire [1:0] _dataArb_io_in_2_bits_addr_T = probe_bits_address[7:6]; // @[DCache.scala:184:29, :1200:47]
assign _metaArb_io_in_4_bits_idx_T = probe_bits_address[7:6]; // @[DCache.scala:184:29, :1200:47]
wire [1:0] _metaArb_io_in_1_bits_idx_T_1 = s2_vaddr[7:6]; // @[DCache.scala:351:21, :453:76]
assign _metaArb_io_in_2_bits_idx_T = s2_vaddr[7:6]; // @[DCache.scala:351:21, :453:76, :465:40]
assign _metaArb_io_in_3_bits_idx_T = s2_vaddr[7:6]; // @[DCache.scala:351:21, :453:76, :744:40]
assign _metaArb_io_in_1_bits_idx_T_2 = s2_probe ? _metaArb_io_in_1_bits_idx_T : _metaArb_io_in_1_bits_idx_T_1; // @[DCache.scala:333:25, :453:{35,76}, :1200:47]
assign metaArb_io_in_1_bits_idx = _metaArb_io_in_1_bits_idx_T_2; // @[DCache.scala:135:28, :453:35]
wire [7:0] _metaArb_io_in_1_bits_addr_T_1 = {_metaArb_io_in_1_bits_idx_T_2, 6'h0}; // @[DCache.scala:453:35, :454:98]
assign _metaArb_io_in_1_bits_addr_T_2 = {_metaArb_io_in_1_bits_addr_T, _metaArb_io_in_1_bits_addr_T_1}; // @[DCache.scala:454:{36,58,98}]
assign metaArb_io_in_1_bits_addr = _metaArb_io_in_1_bits_addr_T_2; // @[DCache.scala:135:28, :454:36]
assign _metaArb_io_in_1_bits_data_T = {metaArb_io_in_1_bits_data_new_meta_coh_state, metaArb_io_in_1_bits_data_new_meta_tag}; // @[DCache.scala:456:31, :458:14]
assign metaArb_io_in_1_bits_data = _metaArb_io_in_1_bits_data_T; // @[DCache.scala:135:28, :458:14]
assign metaArb_io_in_2_valid = _metaArb_io_in_2_valid_T; // @[DCache.scala:135:28, :462:63]
assign metaArb_io_in_2_bits_idx = _metaArb_io_in_2_bits_idx_T; // @[DCache.scala:135:28, :465:40]
wire [7:0] _metaArb_io_in_2_bits_addr_T_1 = s2_vaddr[7:0]; // @[DCache.scala:351:21, :466:80]
wire [7:0] _metaArb_io_in_3_bits_addr_T_1 = s2_vaddr[7:0]; // @[DCache.scala:351:21, :466:80, :745:80]
assign _metaArb_io_in_2_bits_addr_T_2 = {_metaArb_io_in_2_bits_addr_T, _metaArb_io_in_2_bits_addr_T_1}; // @[DCache.scala:466:{36,58,80}]
assign metaArb_io_in_2_bits_addr = _metaArb_io_in_2_bits_addr_T_2; // @[DCache.scala:135:28, :466:36]
wire [31:0] _metaArb_io_in_2_bits_data_T = s2_req_addr[39:8]; // @[DCache.scala:339:19, :467:68]
wire [31:0] _metaArb_io_in_3_bits_data_T = s2_req_addr[39:8]; // @[DCache.scala:339:19, :467:68, :746:68]
wire [23:0] metaArb_io_in_2_bits_data_meta_tag; // @[HellaCache.scala:305:20]
assign metaArb_io_in_2_bits_data_meta_tag = _metaArb_io_in_2_bits_data_T[23:0]; // @[HellaCache.scala:305:20, :306:14]
assign _metaArb_io_in_2_bits_data_T_1 = {metaArb_io_in_2_bits_data_meta_coh_state, metaArb_io_in_2_bits_data_meta_tag}; // @[HellaCache.scala:305:20]
assign metaArb_io_in_2_bits_data = _metaArb_io_in_2_bits_data_T_1; // @[DCache.scala:135:28, :467:97]
reg [4:0] lrscCount; // @[DCache.scala:472:26]
wire lrscValid = |(lrscCount[4:2]); // @[DCache.scala:472:26, :473:29]
wire _lrscBackingOff_T = |lrscCount; // @[DCache.scala:472:26, :474:34]
wire _lrscBackingOff_T_1 = ~lrscValid; // @[DCache.scala:473:29, :474:43]
wire lrscBackingOff = _lrscBackingOff_T & _lrscBackingOff_T_1; // @[DCache.scala:474:{34,40,43}]
reg [33:0] lrscAddr; // @[DCache.scala:475:21]
wire [33:0] _lrscAddrMatch_T = s2_req_addr[39:6]; // @[DCache.scala:339:19, :476:49]
wire [33:0] _lrscAddr_T = s2_req_addr[39:6]; // @[DCache.scala:339:19, :476:49, :480:29]
wire [33:0] _acquire_address_T = s2_req_addr[39:6]; // @[DCache.scala:339:19, :476:49, :578:38]
wire [33:0] _tl_out_a_bits_T_1 = s2_req_addr[39:6]; // @[DCache.scala:339:19, :476:49, :1210:39]
wire [33:0] _io_errors_bus_bits_T = s2_req_addr[39:6]; // @[DCache.scala:339:19, :476:49, :1130:58]
wire lrscAddrMatch = lrscAddr == _lrscAddrMatch_T; // @[DCache.scala:475:21, :476:{32,49}]
wire _s2_sc_fail_T = lrscValid & lrscAddrMatch; // @[DCache.scala:473:29, :476:32, :477:41]
wire _s2_sc_fail_T_1 = ~_s2_sc_fail_T; // @[DCache.scala:477:{29,41}]
wire [4:0] _lrscCount_T = s2_hit ? 5'h13 : 5'h0; // @[Misc.scala:35:9]
wire [5:0] _lrscCount_T_1 = {1'h0, lrscCount} - 6'h1; // @[DCache.scala:472:26, :482:51]
wire [4:0] _lrscCount_T_2 = _lrscCount_T_1[4:0]; // @[DCache.scala:482:51]
wire _s2_correct_T = ~any_pstore_valid; // @[DCache.scala:230:30, :487:37]
wire _s2_correct_T_2 = any_pstore_valid | s2_valid; // @[DCache.scala:230:30, :331:25, :487:84]
reg s2_correct_REG; // @[DCache.scala:487:66]
wire _s2_correct_T_3 = ~s2_correct_REG; // @[DCache.scala:487:{58,66}]
wire _GEN_76 = s1_valid_not_nacked & s1_write; // @[DCache.scala:187:38, :492:63]
wire _pstore1_cmd_T; // @[DCache.scala:492:63]
assign _pstore1_cmd_T = _GEN_76; // @[DCache.scala:492:63]
wire _pstore1_addr_T; // @[DCache.scala:493:62]
assign _pstore1_addr_T = _GEN_76; // @[DCache.scala:492:63, :493:62]
wire _pstore1_data_T; // @[DCache.scala:494:73]
assign _pstore1_data_T = _GEN_76; // @[DCache.scala:492:63, :494:73]
wire _pstore1_way_T; // @[DCache.scala:495:63]
assign _pstore1_way_T = _GEN_76; // @[DCache.scala:492:63, :495:63]
wire _pstore1_mask_T; // @[DCache.scala:496:61]
assign _pstore1_mask_T = _GEN_76; // @[DCache.scala:492:63, :496:61]
wire _pstore1_rmw_T_53; // @[DCache.scala:498:84]
assign _pstore1_rmw_T_53 = _GEN_76; // @[DCache.scala:492:63, :498:84]
reg [4:0] pstore1_cmd; // @[DCache.scala:492:30]
reg [39:0] pstore1_addr; // @[DCache.scala:493:31]
wire [39:0] _pstore2_addr_T = pstore1_addr; // @[DCache.scala:493:31, :524:35]
reg [63:0] pstore1_data; // @[DCache.scala:494:31]
assign io_cpu_resp_bits_store_data_0 = pstore1_data; // @[DCache.scala:101:7, :494:31]
wire [63:0] pstore1_storegen_data = pstore1_data; // @[DCache.scala:494:31, :497:42]
wire [63:0] put_data = pstore1_data; // @[Edges.scala:480:17]
wire [63:0] putpartial_data = pstore1_data; // @[Edges.scala:500:17]
wire [63:0] atomics_a_data = pstore1_data; // @[Edges.scala:534:17]
wire [63:0] atomics_a_1_data = pstore1_data; // @[Edges.scala:534:17]
wire [63:0] atomics_a_2_data = pstore1_data; // @[Edges.scala:534:17]
wire [63:0] atomics_a_3_data = pstore1_data; // @[Edges.scala:534:17]
wire [63:0] atomics_a_4_data = pstore1_data; // @[Edges.scala:517:17]
wire [63:0] atomics_a_5_data = pstore1_data; // @[Edges.scala:517:17]
wire [63:0] atomics_a_6_data = pstore1_data; // @[Edges.scala:517:17]
wire [63:0] atomics_a_7_data = pstore1_data; // @[Edges.scala:517:17]
wire [63:0] atomics_a_8_data = pstore1_data; // @[Edges.scala:517:17]
reg [3:0] pstore1_way; // @[DCache.scala:495:30]
wire [3:0] _pstore2_way_T = pstore1_way; // @[DCache.scala:495:30, :525:34]
reg [7:0] pstore1_mask; // @[DCache.scala:496:31]
wire [7:0] pstore2_storegen_mask_mergedMask = pstore1_mask; // @[DCache.scala:496:31, :533:37]
wire _pstore1_rmw_T_4 = _pstore1_rmw_T | _pstore1_rmw_T_1; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_5 = _pstore1_rmw_T_4 | _pstore1_rmw_T_2; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_6 = _pstore1_rmw_T_5 | _pstore1_rmw_T_3; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_11 = _pstore1_rmw_T_7 | _pstore1_rmw_T_8; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_12 = _pstore1_rmw_T_11 | _pstore1_rmw_T_9; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_13 = _pstore1_rmw_T_12 | _pstore1_rmw_T_10; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_19 = _pstore1_rmw_T_14 | _pstore1_rmw_T_15; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_20 = _pstore1_rmw_T_19 | _pstore1_rmw_T_16; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_21 = _pstore1_rmw_T_20 | _pstore1_rmw_T_17; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_22 = _pstore1_rmw_T_21 | _pstore1_rmw_T_18; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_23 = _pstore1_rmw_T_13 | _pstore1_rmw_T_22; // @[package.scala:81:59]
wire _pstore1_rmw_T_24 = _pstore1_rmw_T_6 | _pstore1_rmw_T_23; // @[package.scala:81:59]
wire _pstore1_rmw_T_27 = _pstore1_rmw_T_25 | _pstore1_rmw_T_26; // @[Consts.scala:90:{32,42,49}]
wire _pstore1_rmw_T_29 = _pstore1_rmw_T_27 | _pstore1_rmw_T_28; // @[Consts.scala:90:{42,59,66}]
wire _pstore1_rmw_T_34 = _pstore1_rmw_T_30 | _pstore1_rmw_T_31; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_35 = _pstore1_rmw_T_34 | _pstore1_rmw_T_32; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_36 = _pstore1_rmw_T_35 | _pstore1_rmw_T_33; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_42 = _pstore1_rmw_T_37 | _pstore1_rmw_T_38; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_43 = _pstore1_rmw_T_42 | _pstore1_rmw_T_39; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_44 = _pstore1_rmw_T_43 | _pstore1_rmw_T_40; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_45 = _pstore1_rmw_T_44 | _pstore1_rmw_T_41; // @[package.scala:16:47, :81:59]
wire _pstore1_rmw_T_46 = _pstore1_rmw_T_36 | _pstore1_rmw_T_45; // @[package.scala:81:59]
wire _pstore1_rmw_T_47 = _pstore1_rmw_T_29 | _pstore1_rmw_T_46; // @[Consts.scala:87:44, :90:{59,76}]
wire _pstore1_rmw_T_50 = _pstore1_rmw_T_48; // @[DCache.scala:1191:{35,45}]
wire _pstore1_rmw_T_51 = _pstore1_rmw_T_47 & _pstore1_rmw_T_50; // @[DCache.scala:1191:{23,45}]
wire _pstore1_rmw_T_52 = _pstore1_rmw_T_24 | _pstore1_rmw_T_51; // @[DCache.scala:1190:21, :1191:23]
reg pstore1_rmw_r; // @[DCache.scala:498:44]
wire _pstore1_merge_likely_T = s2_valid_not_nacked_in_s1 & s2_write; // @[DCache.scala:336:44, :499:56]
wire _GEN_77 = s2_valid_hit & s2_write; // @[DCache.scala:422:48, :490:46]
wire _pstore1_merge_T; // @[DCache.scala:490:46]
assign _pstore1_merge_T = _GEN_77; // @[DCache.scala:490:46]
wire _pstore1_valid_T; // @[DCache.scala:490:46]
assign _pstore1_valid_T = _GEN_77; // @[DCache.scala:490:46]
wire _pstore1_held_T; // @[DCache.scala:490:46]
assign _pstore1_held_T = _GEN_77; // @[DCache.scala:490:46]
wire _pstore1_merge_T_2 = _pstore1_merge_T; // @[DCache.scala:490:{46,58}]
wire _pstore1_merge_T_4 = _pstore1_merge_T_2; // @[DCache.scala:490:58, :491:48]
reg pstore2_valid; // @[DCache.scala:501:30]
wire _pstore_drain_opportunistic_T_56 = ~_pstore_drain_opportunistic_T_55; // @[DCache.scala:1186:11]
wire _pstore_drain_opportunistic_T_59 = ~_pstore_drain_opportunistic_T_58; // @[DCache.scala:502:{36,55}]
wire pstore_drain_opportunistic = _pstore_drain_opportunistic_T_59; // @[DCache.scala:502:{36,92}]
reg pstore_drain_on_miss_REG; // @[DCache.scala:503:56]
wire pstore_drain_on_miss = releaseInFlight | pstore_drain_on_miss_REG; // @[DCache.scala:334:46, :503:{46,56}]
reg pstore1_held; // @[DCache.scala:504:29]
wire _GEN_78 = s2_valid & s2_write; // @[DCache.scala:331:25, :505:39]
wire _pstore1_valid_likely_T; // @[DCache.scala:505:39]
assign _pstore1_valid_likely_T = _GEN_78; // @[DCache.scala:505:39]
wire _io_cpu_perf_storeBufferEmptyAfterLoad_T_1; // @[DCache.scala:1082:16]
assign _io_cpu_perf_storeBufferEmptyAfterLoad_T_1 = _GEN_78; // @[DCache.scala:505:39, :1082:16]
wire _io_cpu_perf_storeBufferEmptyAfterStore_T_1; // @[DCache.scala:1086:15]
assign _io_cpu_perf_storeBufferEmptyAfterStore_T_1 = _GEN_78; // @[DCache.scala:505:39, :1086:15]
wire _io_cpu_perf_storeBufferEmptyAfterStore_T_4; // @[DCache.scala:1087:16]
assign _io_cpu_perf_storeBufferEmptyAfterStore_T_4 = _GEN_78; // @[DCache.scala:505:39, :1087:16]
wire _io_cpu_perf_canAcceptStoreThenLoad_T; // @[DCache.scala:1089:16]
assign _io_cpu_perf_canAcceptStoreThenLoad_T = _GEN_78; // @[DCache.scala:505:39, :1089:16]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_55; // @[DCache.scala:1092:100]
assign _io_cpu_perf_canAcceptLoadThenLoad_T_55 = _GEN_78; // @[DCache.scala:505:39, :1092:100]
wire pstore1_valid_likely = _pstore1_valid_likely_T | pstore1_held; // @[DCache.scala:504:29, :505:{39,51}]
wire _pstore1_valid_T_2 = _pstore1_valid_T; // @[DCache.scala:490:{46,58}]
wire _pstore1_valid_T_4 = _pstore1_valid_T_2; // @[DCache.scala:490:58, :491:48]
wire pstore1_valid = _pstore1_valid_T_4 | pstore1_held; // @[DCache.scala:491:48, :504:29, :507:38]
wire _advance_pstore1_T = pstore1_valid; // @[DCache.scala:507:38, :522:40]
assign _any_pstore_valid_T = pstore1_held | pstore2_valid; // @[DCache.scala:501:30, :504:29, :508:36]
assign any_pstore_valid = _any_pstore_valid_T; // @[DCache.scala:230:30, :508:36]
wire _GEN_79 = pstore1_valid_likely & pstore2_valid; // @[DCache.scala:501:30, :505:51, :509:54]
wire _pstore_drain_structural_T; // @[DCache.scala:509:54]
assign _pstore_drain_structural_T = _GEN_79; // @[DCache.scala:509:54]
wire _io_cpu_perf_canAcceptStoreThenLoad_T_6; // @[DCache.scala:1090:20]
assign _io_cpu_perf_canAcceptStoreThenLoad_T_6 = _GEN_79; // @[DCache.scala:509:54, :1090:20]
wire _GEN_80 = s1_valid & s1_write; // @[DCache.scala:182:25, :509:85]
wire _pstore_drain_structural_T_1; // @[DCache.scala:509:85]
assign _pstore_drain_structural_T_1 = _GEN_80; // @[DCache.scala:509:85]
wire _io_cpu_perf_storeBufferEmptyAfterLoad_T; // @[DCache.scala:1081:15]
assign _io_cpu_perf_storeBufferEmptyAfterLoad_T = _GEN_80; // @[DCache.scala:509:85, :1081:15]
wire _io_cpu_perf_storeBufferEmptyAfterStore_T; // @[DCache.scala:1085:15]
assign _io_cpu_perf_storeBufferEmptyAfterStore_T = _GEN_80; // @[DCache.scala:509:85, :1085:15]
wire _io_cpu_perf_canAcceptStoreThenLoad_T_2; // @[DCache.scala:1089:57]
assign _io_cpu_perf_canAcceptStoreThenLoad_T_2 = _GEN_80; // @[DCache.scala:509:85, :1089:57]
wire _io_cpu_perf_canAcceptStoreThenLoad_T_7; // @[DCache.scala:1090:57]
assign _io_cpu_perf_canAcceptStoreThenLoad_T_7 = _GEN_80; // @[DCache.scala:509:85, :1090:57]
wire _io_cpu_perf_canAcceptLoadThenLoad_T; // @[DCache.scala:1092:52]
assign _io_cpu_perf_canAcceptLoadThenLoad_T = _GEN_80; // @[DCache.scala:509:85, :1092:52]
wire _pstore_drain_structural_T_2 = _pstore_drain_structural_T_1; // @[DCache.scala:509:{85,98}]
wire pstore_drain_structural = _pstore_drain_structural_T & _pstore_drain_structural_T_2; // @[DCache.scala:509:{54,71,98}]
wire _T_49 = s2_valid_hit_pre_data_ecc & s2_write; // @[DCache.scala:420:69, :506:72]
wire _pstore_drain_T_2; // @[DCache.scala:506:72]
assign _pstore_drain_T_2 = _T_49; // @[DCache.scala:506:72]
wire _dataArb_io_in_0_valid_T_2; // @[DCache.scala:506:72]
assign _dataArb_io_in_0_valid_T_2 = _T_49; // @[DCache.scala:506:72]
wire _pstore_drain_T_4 = _pstore_drain_T_2; // @[DCache.scala:506:{72,84}]
wire _pstore_drain_T_5 = _pstore_drain_T_4 | pstore1_held; // @[DCache.scala:504:29, :506:{84,96}]
wire _pstore_drain_T_7 = _pstore_drain_T_5; // @[DCache.scala:506:96, :518:41]
wire _pstore_drain_T_8 = _pstore_drain_T_7 | pstore2_valid; // @[DCache.scala:501:30, :518:{41,58}]
wire _GEN_81 = pstore_drain_opportunistic | pstore_drain_on_miss; // @[DCache.scala:502:92, :503:46, :518:107]
wire _pstore_drain_T_9; // @[DCache.scala:518:107]
assign _pstore_drain_T_9 = _GEN_81; // @[DCache.scala:518:107]
wire _dataArb_io_in_0_valid_T_9; // @[DCache.scala:518:107]
assign _dataArb_io_in_0_valid_T_9 = _GEN_81; // @[DCache.scala:518:107]
wire _pstore_drain_T_10 = _pstore_drain_T_8 & _pstore_drain_T_9; // @[DCache.scala:518:{58,76,107}]
wire _pstore_drain_T_11 = _pstore_drain_T_10; // @[DCache.scala:517:44, :518:76]
assign pstore_drain = _pstore_drain_T_11; // @[DCache.scala:516:27, :517:44]
assign dataArb_io_in_0_bits_write = pstore_drain; // @[DCache.scala:152:28, :516:27]
wire _pstore1_held_T_2 = _pstore1_held_T; // @[DCache.scala:490:{46,58}]
wire _pstore1_held_T_4 = _pstore1_held_T_2; // @[DCache.scala:490:58, :491:48]
wire _pstore1_held_T_6 = _pstore1_held_T_4; // @[DCache.scala:491:48, :521:35]
wire _pstore1_held_T_7 = _pstore1_held_T_6 | pstore1_held; // @[DCache.scala:504:29, :521:{35,54}]
wire _pstore1_held_T_8 = _pstore1_held_T_7 & pstore2_valid; // @[DCache.scala:501:30, :521:{54,71}]
wire _pstore1_held_T_9 = ~pstore_drain; // @[DCache.scala:516:27, :521:91]
wire _pstore1_held_T_10 = _pstore1_held_T_8 & _pstore1_held_T_9; // @[DCache.scala:521:{71,88,91}]
wire _advance_pstore1_T_1 = pstore2_valid == pstore_drain; // @[DCache.scala:501:30, :516:27, :522:79]
wire advance_pstore1 = _advance_pstore1_T & _advance_pstore1_T_1; // @[DCache.scala:522:{40,61,79}]
wire _pstore2_storegen_data_T_3 = advance_pstore1; // @[DCache.scala:522:61, :528:78]
wire _pstore2_storegen_data_T_7 = advance_pstore1; // @[DCache.scala:522:61, :528:78]
wire _pstore2_storegen_data_T_11 = advance_pstore1; // @[DCache.scala:522:61, :528:78]
wire _pstore2_storegen_data_T_15 = advance_pstore1; // @[DCache.scala:522:61, :528:78]
wire _pstore2_storegen_data_T_19 = advance_pstore1; // @[DCache.scala:522:61, :528:78]
wire _pstore2_storegen_data_T_23 = advance_pstore1; // @[DCache.scala:522:61, :528:78]
wire _pstore2_storegen_data_T_27 = advance_pstore1; // @[DCache.scala:522:61, :528:78]
wire _pstore2_storegen_data_T_31 = advance_pstore1; // @[DCache.scala:522:61, :528:78]
wire _pstore2_storegen_mask_T = advance_pstore1; // @[DCache.scala:522:61, :532:27]
wire _pstore2_valid_T = ~pstore_drain; // @[DCache.scala:516:27, :521:91, :523:37]
wire _pstore2_valid_T_1 = pstore2_valid & _pstore2_valid_T; // @[DCache.scala:501:30, :523:{34,37}]
wire _pstore2_valid_T_2 = _pstore2_valid_T_1 | advance_pstore1; // @[DCache.scala:522:61, :523:{34,51}]
reg [39:0] pstore2_addr; // @[DCache.scala:524:31]
reg [3:0] pstore2_way; // @[DCache.scala:525:30]
wire [7:0] _pstore2_storegen_data_T = pstore1_storegen_data[7:0]; // @[DCache.scala:497:42, :528:44]
wire _pstore2_storegen_data_T_1 = pstore1_mask[0]; // @[DCache.scala:496:31, :528:110]
wire _s1_hazard_T_3 = pstore1_mask[0]; // @[package.scala:211:50]
reg [7:0] pstore2_storegen_data_r; // @[DCache.scala:528:22]
wire [7:0] _pstore2_storegen_data_T_4 = pstore1_storegen_data[15:8]; // @[DCache.scala:497:42, :528:44]
wire _pstore2_storegen_data_T_5 = pstore1_mask[1]; // @[DCache.scala:496:31, :528:110]
wire _s1_hazard_T_4 = pstore1_mask[1]; // @[package.scala:211:50]
reg [7:0] pstore2_storegen_data_r_1; // @[DCache.scala:528:22]
wire [7:0] _pstore2_storegen_data_T_8 = pstore1_storegen_data[23:16]; // @[DCache.scala:497:42, :528:44]
wire _pstore2_storegen_data_T_9 = pstore1_mask[2]; // @[DCache.scala:496:31, :528:110]
wire _s1_hazard_T_5 = pstore1_mask[2]; // @[package.scala:211:50]
reg [7:0] pstore2_storegen_data_r_2; // @[DCache.scala:528:22]
wire [7:0] _pstore2_storegen_data_T_12 = pstore1_storegen_data[31:24]; // @[DCache.scala:497:42, :528:44]
wire _pstore2_storegen_data_T_13 = pstore1_mask[3]; // @[DCache.scala:496:31, :528:110]
wire _s1_hazard_T_6 = pstore1_mask[3]; // @[package.scala:211:50]
reg [7:0] pstore2_storegen_data_r_3; // @[DCache.scala:528:22]
wire [7:0] _pstore2_storegen_data_T_16 = pstore1_storegen_data[39:32]; // @[DCache.scala:497:42, :528:44]
wire _pstore2_storegen_data_T_17 = pstore1_mask[4]; // @[DCache.scala:496:31, :528:110]
wire _s1_hazard_T_7 = pstore1_mask[4]; // @[package.scala:211:50]
reg [7:0] pstore2_storegen_data_r_4; // @[DCache.scala:528:22]
wire [7:0] _pstore2_storegen_data_T_20 = pstore1_storegen_data[47:40]; // @[DCache.scala:497:42, :528:44]
wire _pstore2_storegen_data_T_21 = pstore1_mask[5]; // @[DCache.scala:496:31, :528:110]
wire _s1_hazard_T_8 = pstore1_mask[5]; // @[package.scala:211:50]
reg [7:0] pstore2_storegen_data_r_5; // @[DCache.scala:528:22]
wire [7:0] _pstore2_storegen_data_T_24 = pstore1_storegen_data[55:48]; // @[DCache.scala:497:42, :528:44]
wire _pstore2_storegen_data_T_25 = pstore1_mask[6]; // @[DCache.scala:496:31, :528:110]
wire _s1_hazard_T_9 = pstore1_mask[6]; // @[package.scala:211:50]
reg [7:0] pstore2_storegen_data_r_6; // @[DCache.scala:528:22]
wire [7:0] _pstore2_storegen_data_T_28 = pstore1_storegen_data[63:56]; // @[DCache.scala:497:42, :528:44]
wire _pstore2_storegen_data_T_29 = pstore1_mask[7]; // @[DCache.scala:496:31, :528:110]
wire _s1_hazard_T_10 = pstore1_mask[7]; // @[package.scala:211:50]
reg [7:0] pstore2_storegen_data_r_7; // @[DCache.scala:528:22]
wire [15:0] pstore2_storegen_data_lo_lo = {pstore2_storegen_data_r_1, pstore2_storegen_data_r}; // @[package.scala:45:27]
wire [15:0] pstore2_storegen_data_lo_hi = {pstore2_storegen_data_r_3, pstore2_storegen_data_r_2}; // @[package.scala:45:27]
wire [31:0] pstore2_storegen_data_lo = {pstore2_storegen_data_lo_hi, pstore2_storegen_data_lo_lo}; // @[package.scala:45:27]
wire [15:0] pstore2_storegen_data_hi_lo = {pstore2_storegen_data_r_5, pstore2_storegen_data_r_4}; // @[package.scala:45:27]
wire [15:0] pstore2_storegen_data_hi_hi = {pstore2_storegen_data_r_7, pstore2_storegen_data_r_6}; // @[package.scala:45:27]
wire [31:0] pstore2_storegen_data_hi = {pstore2_storegen_data_hi_hi, pstore2_storegen_data_hi_lo}; // @[package.scala:45:27]
wire [63:0] pstore2_storegen_data = {pstore2_storegen_data_hi, pstore2_storegen_data_lo}; // @[package.scala:45:27]
reg [7:0] pstore2_storegen_mask; // @[DCache.scala:531:19]
wire [7:0] _pstore2_storegen_mask_mask_T = ~pstore2_storegen_mask_mergedMask; // @[DCache.scala:533:37, :534:37]
wire [7:0] _pstore2_storegen_mask_mask_T_1 = _pstore2_storegen_mask_mask_T; // @[DCache.scala:534:{19,37}]
wire [7:0] _pstore2_storegen_mask_mask_T_2 = ~_pstore2_storegen_mask_mask_T_1; // @[DCache.scala:534:{15,19}]
wire _dataArb_io_in_0_valid_T_4 = _dataArb_io_in_0_valid_T_2; // @[DCache.scala:506:{72,84}]
wire _dataArb_io_in_0_valid_T_5 = _dataArb_io_in_0_valid_T_4 | pstore1_held; // @[DCache.scala:504:29, :506:{84,96}]
wire _dataArb_io_in_0_valid_T_7 = _dataArb_io_in_0_valid_T_5; // @[DCache.scala:506:96, :518:41]
wire _dataArb_io_in_0_valid_T_8 = _dataArb_io_in_0_valid_T_7 | pstore2_valid; // @[DCache.scala:501:30, :518:{41,58}]
wire _dataArb_io_in_0_valid_T_10 = _dataArb_io_in_0_valid_T_8 & _dataArb_io_in_0_valid_T_9; // @[DCache.scala:518:{58,76,107}]
wire _dataArb_io_in_0_valid_T_11 = _dataArb_io_in_0_valid_T_10; // @[DCache.scala:517:44, :518:76]
assign _dataArb_io_in_0_valid_T_12 = _dataArb_io_in_0_valid_T_11; // @[DCache.scala:516:27, :517:44]
assign dataArb_io_in_0_valid = _dataArb_io_in_0_valid_T_12; // @[DCache.scala:152:28, :516:27]
wire [39:0] _GEN_82 = pstore2_valid ? pstore2_addr : pstore1_addr; // @[DCache.scala:493:31, :501:30, :524:31, :549:36]
wire [39:0] _dataArb_io_in_0_bits_addr_T; // @[DCache.scala:549:36]
assign _dataArb_io_in_0_bits_addr_T = _GEN_82; // @[DCache.scala:549:36]
wire [39:0] _dataArb_io_in_0_bits_wordMask_wordMask_T; // @[DCache.scala:554:32]
assign _dataArb_io_in_0_bits_wordMask_wordMask_T = _GEN_82; // @[DCache.scala:549:36, :554:32]
assign dataArb_io_in_0_bits_addr = _dataArb_io_in_0_bits_addr_T[7:0]; // @[DCache.scala:152:28, :549:{30,36}]
assign _dataArb_io_in_0_bits_way_en_T = pstore2_valid ? pstore2_way : pstore1_way; // @[DCache.scala:495:30, :501:30, :525:30, :550:38]
assign dataArb_io_in_0_bits_way_en = _dataArb_io_in_0_bits_way_en_T; // @[DCache.scala:152:28, :550:38]
wire [63:0] _dataArb_io_in_0_bits_wdata_T = pstore2_valid ? pstore2_storegen_data : pstore1_data; // @[package.scala:45:27]
wire [7:0] _dataArb_io_in_0_bits_wdata_T_1 = _dataArb_io_in_0_bits_wdata_T[7:0]; // @[package.scala:211:50]
wire [7:0] _dataArb_io_in_0_bits_wdata_T_2 = _dataArb_io_in_0_bits_wdata_T[15:8]; // @[package.scala:211:50]
wire [7:0] _dataArb_io_in_0_bits_wdata_T_3 = _dataArb_io_in_0_bits_wdata_T[23:16]; // @[package.scala:211:50]
wire [7:0] _dataArb_io_in_0_bits_wdata_T_4 = _dataArb_io_in_0_bits_wdata_T[31:24]; // @[package.scala:211:50]
wire [7:0] _dataArb_io_in_0_bits_wdata_T_5 = _dataArb_io_in_0_bits_wdata_T[39:32]; // @[package.scala:211:50]
wire [7:0] _dataArb_io_in_0_bits_wdata_T_6 = _dataArb_io_in_0_bits_wdata_T[47:40]; // @[package.scala:211:50]
wire [7:0] _dataArb_io_in_0_bits_wdata_T_7 = _dataArb_io_in_0_bits_wdata_T[55:48]; // @[package.scala:211:50]
wire [7:0] _dataArb_io_in_0_bits_wdata_T_8 = _dataArb_io_in_0_bits_wdata_T[63:56]; // @[package.scala:211:50]
wire [15:0] dataArb_io_in_0_bits_wdata_lo_lo = {_dataArb_io_in_0_bits_wdata_T_2, _dataArb_io_in_0_bits_wdata_T_1}; // @[package.scala:45:27, :211:50]
wire [15:0] dataArb_io_in_0_bits_wdata_lo_hi = {_dataArb_io_in_0_bits_wdata_T_4, _dataArb_io_in_0_bits_wdata_T_3}; // @[package.scala:45:27, :211:50]
wire [31:0] dataArb_io_in_0_bits_wdata_lo = {dataArb_io_in_0_bits_wdata_lo_hi, dataArb_io_in_0_bits_wdata_lo_lo}; // @[package.scala:45:27]
wire [15:0] dataArb_io_in_0_bits_wdata_hi_lo = {_dataArb_io_in_0_bits_wdata_T_6, _dataArb_io_in_0_bits_wdata_T_5}; // @[package.scala:45:27, :211:50]
wire [15:0] dataArb_io_in_0_bits_wdata_hi_hi = {_dataArb_io_in_0_bits_wdata_T_8, _dataArb_io_in_0_bits_wdata_T_7}; // @[package.scala:45:27, :211:50]
wire [31:0] dataArb_io_in_0_bits_wdata_hi = {dataArb_io_in_0_bits_wdata_hi_hi, dataArb_io_in_0_bits_wdata_hi_lo}; // @[package.scala:45:27]
assign _dataArb_io_in_0_bits_wdata_T_9 = {dataArb_io_in_0_bits_wdata_hi, dataArb_io_in_0_bits_wdata_lo}; // @[package.scala:45:27]
assign dataArb_io_in_0_bits_wdata = _dataArb_io_in_0_bits_wdata_T_9; // @[package.scala:45:27]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T = _dataArb_io_in_0_bits_eccMask_T_17[0]; // @[package.scala:45:27]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_1 = _dataArb_io_in_0_bits_eccMask_T_17[1]; // @[package.scala:45:27]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_2 = _dataArb_io_in_0_bits_eccMask_T_17[2]; // @[package.scala:45:27]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_3 = _dataArb_io_in_0_bits_eccMask_T_17[3]; // @[package.scala:45:27]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_4 = _dataArb_io_in_0_bits_eccMask_T_17[4]; // @[package.scala:45:27]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_5 = _dataArb_io_in_0_bits_eccMask_T_17[5]; // @[package.scala:45:27]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_6 = _dataArb_io_in_0_bits_eccMask_T_17[6]; // @[package.scala:45:27]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_7 = _dataArb_io_in_0_bits_eccMask_T_17[7]; // @[package.scala:45:27]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_8 = _dataArb_io_in_0_bits_wordMask_eccMask_T | _dataArb_io_in_0_bits_wordMask_eccMask_T_1; // @[package.scala:81:59]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_9 = _dataArb_io_in_0_bits_wordMask_eccMask_T_8 | _dataArb_io_in_0_bits_wordMask_eccMask_T_2; // @[package.scala:81:59]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_10 = _dataArb_io_in_0_bits_wordMask_eccMask_T_9 | _dataArb_io_in_0_bits_wordMask_eccMask_T_3; // @[package.scala:81:59]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_11 = _dataArb_io_in_0_bits_wordMask_eccMask_T_10 | _dataArb_io_in_0_bits_wordMask_eccMask_T_4; // @[package.scala:81:59]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_12 = _dataArb_io_in_0_bits_wordMask_eccMask_T_11 | _dataArb_io_in_0_bits_wordMask_eccMask_T_5; // @[package.scala:81:59]
wire _dataArb_io_in_0_bits_wordMask_eccMask_T_13 = _dataArb_io_in_0_bits_wordMask_eccMask_T_12 | _dataArb_io_in_0_bits_wordMask_eccMask_T_6; // @[package.scala:81:59]
wire dataArb_io_in_0_bits_wordMask_eccMask = _dataArb_io_in_0_bits_wordMask_eccMask_T_13 | _dataArb_io_in_0_bits_wordMask_eccMask_T_7; // @[package.scala:81:59]
wire [1:0] _dataArb_io_in_0_bits_wordMask_T_3 = {1'h0, dataArb_io_in_0_bits_wordMask_eccMask}; // @[package.scala:81:59]
assign dataArb_io_in_0_bits_wordMask = _dataArb_io_in_0_bits_wordMask_T_3[0]; // @[DCache.scala:152:28, :552:34, :555:55]
wire [7:0] _dataArb_io_in_0_bits_eccMask_T = pstore2_valid ? pstore2_storegen_mask : pstore1_mask; // @[DCache.scala:496:31, :501:30, :531:19, :557:47]
wire _dataArb_io_in_0_bits_eccMask_T_1 = _dataArb_io_in_0_bits_eccMask_T[0]; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_9 = _dataArb_io_in_0_bits_eccMask_T_1; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_2 = _dataArb_io_in_0_bits_eccMask_T[1]; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_10 = _dataArb_io_in_0_bits_eccMask_T_2; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_3 = _dataArb_io_in_0_bits_eccMask_T[2]; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_11 = _dataArb_io_in_0_bits_eccMask_T_3; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_4 = _dataArb_io_in_0_bits_eccMask_T[3]; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_12 = _dataArb_io_in_0_bits_eccMask_T_4; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_5 = _dataArb_io_in_0_bits_eccMask_T[4]; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_13 = _dataArb_io_in_0_bits_eccMask_T_5; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_6 = _dataArb_io_in_0_bits_eccMask_T[5]; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_14 = _dataArb_io_in_0_bits_eccMask_T_6; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_7 = _dataArb_io_in_0_bits_eccMask_T[6]; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_15 = _dataArb_io_in_0_bits_eccMask_T_7; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_8 = _dataArb_io_in_0_bits_eccMask_T[7]; // @[package.scala:211:50]
wire _dataArb_io_in_0_bits_eccMask_T_16 = _dataArb_io_in_0_bits_eccMask_T_8; // @[package.scala:211:50]
wire [1:0] dataArb_io_in_0_bits_eccMask_lo_lo = {_dataArb_io_in_0_bits_eccMask_T_10, _dataArb_io_in_0_bits_eccMask_T_9}; // @[package.scala:45:27]
wire [1:0] dataArb_io_in_0_bits_eccMask_lo_hi = {_dataArb_io_in_0_bits_eccMask_T_12, _dataArb_io_in_0_bits_eccMask_T_11}; // @[package.scala:45:27]
wire [3:0] dataArb_io_in_0_bits_eccMask_lo = {dataArb_io_in_0_bits_eccMask_lo_hi, dataArb_io_in_0_bits_eccMask_lo_lo}; // @[package.scala:45:27]
wire [1:0] dataArb_io_in_0_bits_eccMask_hi_lo = {_dataArb_io_in_0_bits_eccMask_T_14, _dataArb_io_in_0_bits_eccMask_T_13}; // @[package.scala:45:27]
wire [1:0] dataArb_io_in_0_bits_eccMask_hi_hi = {_dataArb_io_in_0_bits_eccMask_T_16, _dataArb_io_in_0_bits_eccMask_T_15}; // @[package.scala:45:27]
wire [3:0] dataArb_io_in_0_bits_eccMask_hi = {dataArb_io_in_0_bits_eccMask_hi_hi, dataArb_io_in_0_bits_eccMask_hi_lo}; // @[package.scala:45:27]
assign _dataArb_io_in_0_bits_eccMask_T_17 = {dataArb_io_in_0_bits_eccMask_hi, dataArb_io_in_0_bits_eccMask_lo}; // @[package.scala:45:27]
assign dataArb_io_in_0_bits_eccMask = _dataArb_io_in_0_bits_eccMask_T_17; // @[package.scala:45:27]
wire [4:0] _s1_hazard_T = pstore1_addr[7:3]; // @[DCache.scala:493:31, :561:9]
wire [4:0] _s1_hazard_T_1 = s1_vaddr[7:3]; // @[DCache.scala:197:21, :561:43]
wire [4:0] _s1_hazard_T_63 = s1_vaddr[7:3]; // @[DCache.scala:197:21, :561:43]
wire _s1_hazard_T_2 = _s1_hazard_T == _s1_hazard_T_1; // @[DCache.scala:561:{9,31,43}]
wire _s1_hazard_T_11 = _s1_hazard_T_3; // @[package.scala:211:50]
wire _s1_hazard_T_12 = _s1_hazard_T_4; // @[package.scala:211:50]
wire _s1_hazard_T_13 = _s1_hazard_T_5; // @[package.scala:211:50]
wire _s1_hazard_T_14 = _s1_hazard_T_6; // @[package.scala:211:50]
wire _s1_hazard_T_15 = _s1_hazard_T_7; // @[package.scala:211:50]
wire _s1_hazard_T_16 = _s1_hazard_T_8; // @[package.scala:211:50]
wire _s1_hazard_T_17 = _s1_hazard_T_9; // @[package.scala:211:50]
wire _s1_hazard_T_18 = _s1_hazard_T_10; // @[package.scala:211:50]
wire [1:0] s1_hazard_lo_lo = {_s1_hazard_T_12, _s1_hazard_T_11}; // @[package.scala:45:27]
wire [1:0] s1_hazard_lo_hi = {_s1_hazard_T_14, _s1_hazard_T_13}; // @[package.scala:45:27]
wire [3:0] s1_hazard_lo = {s1_hazard_lo_hi, s1_hazard_lo_lo}; // @[package.scala:45:27]
wire [1:0] s1_hazard_hi_lo = {_s1_hazard_T_16, _s1_hazard_T_15}; // @[package.scala:45:27]
wire [1:0] s1_hazard_hi_hi = {_s1_hazard_T_18, _s1_hazard_T_17}; // @[package.scala:45:27]
wire [3:0] s1_hazard_hi = {s1_hazard_hi_hi, s1_hazard_hi_lo}; // @[package.scala:45:27]
wire [7:0] _s1_hazard_T_19 = {s1_hazard_hi, s1_hazard_lo}; // @[package.scala:45:27]
wire _s1_hazard_T_20 = _s1_hazard_T_19[0]; // @[package.scala:45:27]
wire _s1_hazard_T_21 = _s1_hazard_T_19[1]; // @[package.scala:45:27]
wire _s1_hazard_T_22 = _s1_hazard_T_19[2]; // @[package.scala:45:27]
wire _s1_hazard_T_23 = _s1_hazard_T_19[3]; // @[package.scala:45:27]
wire _s1_hazard_T_24 = _s1_hazard_T_19[4]; // @[package.scala:45:27]
wire _s1_hazard_T_25 = _s1_hazard_T_19[5]; // @[package.scala:45:27]
wire _s1_hazard_T_26 = _s1_hazard_T_19[6]; // @[package.scala:45:27]
wire _s1_hazard_T_27 = _s1_hazard_T_19[7]; // @[package.scala:45:27]
wire [1:0] s1_hazard_lo_lo_1 = {_s1_hazard_T_21, _s1_hazard_T_20}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_lo_hi_1 = {_s1_hazard_T_23, _s1_hazard_T_22}; // @[DCache.scala:1182:52]
wire [3:0] s1_hazard_lo_1 = {s1_hazard_lo_hi_1, s1_hazard_lo_lo_1}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_hi_lo_1 = {_s1_hazard_T_25, _s1_hazard_T_24}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_hi_hi_1 = {_s1_hazard_T_27, _s1_hazard_T_26}; // @[DCache.scala:1182:52]
wire [3:0] s1_hazard_hi_1 = {s1_hazard_hi_hi_1, s1_hazard_hi_lo_1}; // @[DCache.scala:1182:52]
wire [7:0] _s1_hazard_T_28 = {s1_hazard_hi_1, s1_hazard_lo_1}; // @[DCache.scala:1182:52]
wire _s1_hazard_T_29 = s1_mask_xwr[0]; // @[package.scala:211:50]
wire _s1_hazard_T_91 = s1_mask_xwr[0]; // @[package.scala:211:50]
wire _s1_hazard_T_37 = _s1_hazard_T_29; // @[package.scala:211:50]
wire _s1_hazard_T_30 = s1_mask_xwr[1]; // @[package.scala:211:50]
wire _s1_hazard_T_92 = s1_mask_xwr[1]; // @[package.scala:211:50]
wire _s1_hazard_T_38 = _s1_hazard_T_30; // @[package.scala:211:50]
wire _s1_hazard_T_31 = s1_mask_xwr[2]; // @[package.scala:211:50]
wire _s1_hazard_T_93 = s1_mask_xwr[2]; // @[package.scala:211:50]
wire _s1_hazard_T_39 = _s1_hazard_T_31; // @[package.scala:211:50]
wire _s1_hazard_T_32 = s1_mask_xwr[3]; // @[package.scala:211:50]
wire _s1_hazard_T_94 = s1_mask_xwr[3]; // @[package.scala:211:50]
wire _s1_hazard_T_40 = _s1_hazard_T_32; // @[package.scala:211:50]
wire _s1_hazard_T_33 = s1_mask_xwr[4]; // @[package.scala:211:50]
wire _s1_hazard_T_95 = s1_mask_xwr[4]; // @[package.scala:211:50]
wire _s1_hazard_T_41 = _s1_hazard_T_33; // @[package.scala:211:50]
wire _s1_hazard_T_34 = s1_mask_xwr[5]; // @[package.scala:211:50]
wire _s1_hazard_T_96 = s1_mask_xwr[5]; // @[package.scala:211:50]
wire _s1_hazard_T_42 = _s1_hazard_T_34; // @[package.scala:211:50]
wire _s1_hazard_T_35 = s1_mask_xwr[6]; // @[package.scala:211:50]
wire _s1_hazard_T_97 = s1_mask_xwr[6]; // @[package.scala:211:50]
wire _s1_hazard_T_43 = _s1_hazard_T_35; // @[package.scala:211:50]
wire _s1_hazard_T_36 = s1_mask_xwr[7]; // @[package.scala:211:50]
wire _s1_hazard_T_98 = s1_mask_xwr[7]; // @[package.scala:211:50]
wire _s1_hazard_T_44 = _s1_hazard_T_36; // @[package.scala:211:50]
wire [1:0] s1_hazard_lo_lo_2 = {_s1_hazard_T_38, _s1_hazard_T_37}; // @[package.scala:45:27]
wire [1:0] s1_hazard_lo_hi_2 = {_s1_hazard_T_40, _s1_hazard_T_39}; // @[package.scala:45:27]
wire [3:0] s1_hazard_lo_2 = {s1_hazard_lo_hi_2, s1_hazard_lo_lo_2}; // @[package.scala:45:27]
wire [1:0] s1_hazard_hi_lo_2 = {_s1_hazard_T_42, _s1_hazard_T_41}; // @[package.scala:45:27]
wire [1:0] s1_hazard_hi_hi_2 = {_s1_hazard_T_44, _s1_hazard_T_43}; // @[package.scala:45:27]
wire [3:0] s1_hazard_hi_2 = {s1_hazard_hi_hi_2, s1_hazard_hi_lo_2}; // @[package.scala:45:27]
wire [7:0] _s1_hazard_T_45 = {s1_hazard_hi_2, s1_hazard_lo_2}; // @[package.scala:45:27]
wire _s1_hazard_T_46 = _s1_hazard_T_45[0]; // @[package.scala:45:27]
wire _s1_hazard_T_47 = _s1_hazard_T_45[1]; // @[package.scala:45:27]
wire _s1_hazard_T_48 = _s1_hazard_T_45[2]; // @[package.scala:45:27]
wire _s1_hazard_T_49 = _s1_hazard_T_45[3]; // @[package.scala:45:27]
wire _s1_hazard_T_50 = _s1_hazard_T_45[4]; // @[package.scala:45:27]
wire _s1_hazard_T_51 = _s1_hazard_T_45[5]; // @[package.scala:45:27]
wire _s1_hazard_T_52 = _s1_hazard_T_45[6]; // @[package.scala:45:27]
wire _s1_hazard_T_53 = _s1_hazard_T_45[7]; // @[package.scala:45:27]
wire [1:0] s1_hazard_lo_lo_3 = {_s1_hazard_T_47, _s1_hazard_T_46}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_lo_hi_3 = {_s1_hazard_T_49, _s1_hazard_T_48}; // @[DCache.scala:1182:52]
wire [3:0] s1_hazard_lo_3 = {s1_hazard_lo_hi_3, s1_hazard_lo_lo_3}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_hi_lo_3 = {_s1_hazard_T_51, _s1_hazard_T_50}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_hi_hi_3 = {_s1_hazard_T_53, _s1_hazard_T_52}; // @[DCache.scala:1182:52]
wire [3:0] s1_hazard_hi_3 = {s1_hazard_hi_hi_3, s1_hazard_hi_lo_3}; // @[DCache.scala:1182:52]
wire [7:0] _s1_hazard_T_54 = {s1_hazard_hi_3, s1_hazard_lo_3}; // @[DCache.scala:1182:52]
wire [7:0] _s1_hazard_T_55 = _s1_hazard_T_28 & _s1_hazard_T_54; // @[DCache.scala:562:38, :1182:52]
wire _s1_hazard_T_56 = |_s1_hazard_T_55; // @[DCache.scala:562:{38,66}]
wire [7:0] _s1_hazard_T_57 = pstore1_mask & s1_mask_xwr; // @[DCache.scala:496:31, :562:77]
wire _s1_hazard_T_58 = |_s1_hazard_T_57; // @[DCache.scala:562:{77,92}]
wire _s1_hazard_T_59 = s1_write ? _s1_hazard_T_56 : _s1_hazard_T_58; // @[DCache.scala:562:{8,66,92}]
wire _s1_hazard_T_60 = _s1_hazard_T_2 & _s1_hazard_T_59; // @[DCache.scala:561:{31,65}, :562:8]
wire _s1_hazard_T_61 = pstore1_valid_likely & _s1_hazard_T_60; // @[DCache.scala:505:51, :561:65, :564:27]
wire [4:0] _s1_hazard_T_62 = pstore2_addr[7:3]; // @[DCache.scala:524:31, :561:9]
wire _s1_hazard_T_64 = _s1_hazard_T_62 == _s1_hazard_T_63; // @[DCache.scala:561:{9,31,43}]
wire _s1_hazard_T_65 = pstore2_storegen_mask[0]; // @[package.scala:211:50]
wire _s1_hazard_T_73 = _s1_hazard_T_65; // @[package.scala:211:50]
wire _s1_hazard_T_66 = pstore2_storegen_mask[1]; // @[package.scala:211:50]
wire _s1_hazard_T_74 = _s1_hazard_T_66; // @[package.scala:211:50]
wire _s1_hazard_T_67 = pstore2_storegen_mask[2]; // @[package.scala:211:50]
wire _s1_hazard_T_75 = _s1_hazard_T_67; // @[package.scala:211:50]
wire _s1_hazard_T_68 = pstore2_storegen_mask[3]; // @[package.scala:211:50]
wire _s1_hazard_T_76 = _s1_hazard_T_68; // @[package.scala:211:50]
wire _s1_hazard_T_69 = pstore2_storegen_mask[4]; // @[package.scala:211:50]
wire _s1_hazard_T_77 = _s1_hazard_T_69; // @[package.scala:211:50]
wire _s1_hazard_T_70 = pstore2_storegen_mask[5]; // @[package.scala:211:50]
wire _s1_hazard_T_78 = _s1_hazard_T_70; // @[package.scala:211:50]
wire _s1_hazard_T_71 = pstore2_storegen_mask[6]; // @[package.scala:211:50]
wire _s1_hazard_T_79 = _s1_hazard_T_71; // @[package.scala:211:50]
wire _s1_hazard_T_72 = pstore2_storegen_mask[7]; // @[package.scala:211:50]
wire _s1_hazard_T_80 = _s1_hazard_T_72; // @[package.scala:211:50]
wire [1:0] s1_hazard_lo_lo_4 = {_s1_hazard_T_74, _s1_hazard_T_73}; // @[package.scala:45:27]
wire [1:0] s1_hazard_lo_hi_4 = {_s1_hazard_T_76, _s1_hazard_T_75}; // @[package.scala:45:27]
wire [3:0] s1_hazard_lo_4 = {s1_hazard_lo_hi_4, s1_hazard_lo_lo_4}; // @[package.scala:45:27]
wire [1:0] s1_hazard_hi_lo_4 = {_s1_hazard_T_78, _s1_hazard_T_77}; // @[package.scala:45:27]
wire [1:0] s1_hazard_hi_hi_4 = {_s1_hazard_T_80, _s1_hazard_T_79}; // @[package.scala:45:27]
wire [3:0] s1_hazard_hi_4 = {s1_hazard_hi_hi_4, s1_hazard_hi_lo_4}; // @[package.scala:45:27]
wire [7:0] _s1_hazard_T_81 = {s1_hazard_hi_4, s1_hazard_lo_4}; // @[package.scala:45:27]
wire _s1_hazard_T_82 = _s1_hazard_T_81[0]; // @[package.scala:45:27]
wire _s1_hazard_T_83 = _s1_hazard_T_81[1]; // @[package.scala:45:27]
wire _s1_hazard_T_84 = _s1_hazard_T_81[2]; // @[package.scala:45:27]
wire _s1_hazard_T_85 = _s1_hazard_T_81[3]; // @[package.scala:45:27]
wire _s1_hazard_T_86 = _s1_hazard_T_81[4]; // @[package.scala:45:27]
wire _s1_hazard_T_87 = _s1_hazard_T_81[5]; // @[package.scala:45:27]
wire _s1_hazard_T_88 = _s1_hazard_T_81[6]; // @[package.scala:45:27]
wire _s1_hazard_T_89 = _s1_hazard_T_81[7]; // @[package.scala:45:27]
wire [1:0] s1_hazard_lo_lo_5 = {_s1_hazard_T_83, _s1_hazard_T_82}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_lo_hi_5 = {_s1_hazard_T_85, _s1_hazard_T_84}; // @[DCache.scala:1182:52]
wire [3:0] s1_hazard_lo_5 = {s1_hazard_lo_hi_5, s1_hazard_lo_lo_5}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_hi_lo_5 = {_s1_hazard_T_87, _s1_hazard_T_86}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_hi_hi_5 = {_s1_hazard_T_89, _s1_hazard_T_88}; // @[DCache.scala:1182:52]
wire [3:0] s1_hazard_hi_5 = {s1_hazard_hi_hi_5, s1_hazard_hi_lo_5}; // @[DCache.scala:1182:52]
wire [7:0] _s1_hazard_T_90 = {s1_hazard_hi_5, s1_hazard_lo_5}; // @[DCache.scala:1182:52]
wire _s1_hazard_T_99 = _s1_hazard_T_91; // @[package.scala:211:50]
wire _s1_hazard_T_100 = _s1_hazard_T_92; // @[package.scala:211:50]
wire _s1_hazard_T_101 = _s1_hazard_T_93; // @[package.scala:211:50]
wire _s1_hazard_T_102 = _s1_hazard_T_94; // @[package.scala:211:50]
wire _s1_hazard_T_103 = _s1_hazard_T_95; // @[package.scala:211:50]
wire _s1_hazard_T_104 = _s1_hazard_T_96; // @[package.scala:211:50]
wire _s1_hazard_T_105 = _s1_hazard_T_97; // @[package.scala:211:50]
wire _s1_hazard_T_106 = _s1_hazard_T_98; // @[package.scala:211:50]
wire [1:0] s1_hazard_lo_lo_6 = {_s1_hazard_T_100, _s1_hazard_T_99}; // @[package.scala:45:27]
wire [1:0] s1_hazard_lo_hi_6 = {_s1_hazard_T_102, _s1_hazard_T_101}; // @[package.scala:45:27]
wire [3:0] s1_hazard_lo_6 = {s1_hazard_lo_hi_6, s1_hazard_lo_lo_6}; // @[package.scala:45:27]
wire [1:0] s1_hazard_hi_lo_6 = {_s1_hazard_T_104, _s1_hazard_T_103}; // @[package.scala:45:27]
wire [1:0] s1_hazard_hi_hi_6 = {_s1_hazard_T_106, _s1_hazard_T_105}; // @[package.scala:45:27]
wire [3:0] s1_hazard_hi_6 = {s1_hazard_hi_hi_6, s1_hazard_hi_lo_6}; // @[package.scala:45:27]
wire [7:0] _s1_hazard_T_107 = {s1_hazard_hi_6, s1_hazard_lo_6}; // @[package.scala:45:27]
wire _s1_hazard_T_108 = _s1_hazard_T_107[0]; // @[package.scala:45:27]
wire _s1_hazard_T_109 = _s1_hazard_T_107[1]; // @[package.scala:45:27]
wire _s1_hazard_T_110 = _s1_hazard_T_107[2]; // @[package.scala:45:27]
wire _s1_hazard_T_111 = _s1_hazard_T_107[3]; // @[package.scala:45:27]
wire _s1_hazard_T_112 = _s1_hazard_T_107[4]; // @[package.scala:45:27]
wire _s1_hazard_T_113 = _s1_hazard_T_107[5]; // @[package.scala:45:27]
wire _s1_hazard_T_114 = _s1_hazard_T_107[6]; // @[package.scala:45:27]
wire _s1_hazard_T_115 = _s1_hazard_T_107[7]; // @[package.scala:45:27]
wire [1:0] s1_hazard_lo_lo_7 = {_s1_hazard_T_109, _s1_hazard_T_108}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_lo_hi_7 = {_s1_hazard_T_111, _s1_hazard_T_110}; // @[DCache.scala:1182:52]
wire [3:0] s1_hazard_lo_7 = {s1_hazard_lo_hi_7, s1_hazard_lo_lo_7}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_hi_lo_7 = {_s1_hazard_T_113, _s1_hazard_T_112}; // @[DCache.scala:1182:52]
wire [1:0] s1_hazard_hi_hi_7 = {_s1_hazard_T_115, _s1_hazard_T_114}; // @[DCache.scala:1182:52]
wire [3:0] s1_hazard_hi_7 = {s1_hazard_hi_hi_7, s1_hazard_hi_lo_7}; // @[DCache.scala:1182:52]
wire [7:0] _s1_hazard_T_116 = {s1_hazard_hi_7, s1_hazard_lo_7}; // @[DCache.scala:1182:52]
wire [7:0] _s1_hazard_T_117 = _s1_hazard_T_90 & _s1_hazard_T_116; // @[DCache.scala:562:38, :1182:52]
wire _s1_hazard_T_118 = |_s1_hazard_T_117; // @[DCache.scala:562:{38,66}]
wire [7:0] _s1_hazard_T_119 = pstore2_storegen_mask & s1_mask_xwr; // @[DCache.scala:531:19, :562:77]
wire _s1_hazard_T_120 = |_s1_hazard_T_119; // @[DCache.scala:562:{77,92}]
wire _s1_hazard_T_121 = s1_write ? _s1_hazard_T_118 : _s1_hazard_T_120; // @[DCache.scala:562:{8,66,92}]
wire _s1_hazard_T_122 = _s1_hazard_T_64 & _s1_hazard_T_121; // @[DCache.scala:561:{31,65}, :562:8]
wire _s1_hazard_T_123 = pstore2_valid & _s1_hazard_T_122; // @[DCache.scala:501:30, :561:65, :565:21]
wire s1_hazard = _s1_hazard_T_61 | _s1_hazard_T_123; // @[DCache.scala:564:{27,69}, :565:21]
wire s1_raw_hazard = s1_read & s1_hazard; // @[DCache.scala:564:69, :566:31]
wire _T_60 = s1_valid & s1_raw_hazard; // @[DCache.scala:182:25, :566:31, :571:18]
reg io_cpu_s2_nack_cause_raw_REG; // @[DCache.scala:574:38]
assign _io_cpu_s2_nack_cause_raw_T_3 = io_cpu_s2_nack_cause_raw_REG; // @[DCache.scala:574:{38,54}]
assign io_cpu_s2_nack_cause_raw_0 = _io_cpu_s2_nack_cause_raw_T_3; // @[DCache.scala:101:7, :574:54]
wire _a_source_T = ~uncachedInFlight_0; // @[DCache.scala:236:33, :577:34]
wire [1:0] _a_source_T_1 = {_a_source_T, 1'h0}; // @[DCache.scala:577:{34,59}]
wire _a_source_T_2 = _a_source_T_1[0]; // @[OneHot.scala:48:45]
wire _a_source_T_3 = _a_source_T_1[1]; // @[OneHot.scala:48:45]
wire a_source = ~_a_source_T_2; // @[OneHot.scala:48:45]
wire get_source = a_source; // @[Mux.scala:50:70]
wire put_source = a_source; // @[Mux.scala:50:70]
wire putpartial_source = a_source; // @[Mux.scala:50:70]
wire atomics_a_source = a_source; // @[Mux.scala:50:70]
wire atomics_a_1_source = a_source; // @[Mux.scala:50:70]
wire atomics_a_2_source = a_source; // @[Mux.scala:50:70]
wire atomics_a_3_source = a_source; // @[Mux.scala:50:70]
wire atomics_a_4_source = a_source; // @[Mux.scala:50:70]
wire atomics_a_5_source = a_source; // @[Mux.scala:50:70]
wire atomics_a_6_source = a_source; // @[Mux.scala:50:70]
wire atomics_a_7_source = a_source; // @[Mux.scala:50:70]
wire atomics_a_8_source = a_source; // @[Mux.scala:50:70]
wire a_sel_shiftAmount = a_source; // @[OneHot.scala:64:49]
wire [39:0] acquire_address = {_acquire_address_T, 6'h0}; // @[DCache.scala:578:{38,49}]
wire [22:0] a_mask = {15'h0, pstore1_mask}; // @[DCache.scala:496:31, :582:29]
wire [39:0] _GEN_83 = {s2_req_addr[39:14], s2_req_addr[13:0] ^ 14'h3000}; // @[DCache.scala:339:19]
wire [39:0] _get_legal_T_4; // @[Parameters.scala:137:31]
assign _get_legal_T_4 = _GEN_83; // @[Parameters.scala:137:31]
wire [39:0] _put_legal_T_4; // @[Parameters.scala:137:31]
assign _put_legal_T_4 = _GEN_83; // @[Parameters.scala:137:31]
wire [39:0] _putpartial_legal_T_4; // @[Parameters.scala:137:31]
assign _putpartial_legal_T_4 = _GEN_83; // @[Parameters.scala:137:31]
wire [40:0] _get_legal_T_5 = {1'h0, _get_legal_T_4}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _get_legal_T_6 = _get_legal_T_5 & 41'hFFEFB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _get_legal_T_7 = _get_legal_T_6; // @[Parameters.scala:137:46]
wire _get_legal_T_8 = _get_legal_T_7 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _get_legal_T_9 = _get_legal_T_8; // @[Parameters.scala:684:54]
wire _get_legal_T_72 = _get_legal_T_9; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _get_legal_T_15 = {1'h0, _get_legal_T_14}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _get_legal_T_16 = _get_legal_T_15 & 41'hFFEFA000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _get_legal_T_17 = _get_legal_T_16; // @[Parameters.scala:137:46]
wire _get_legal_T_18 = _get_legal_T_17 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_84 = {s2_req_addr[39:17], s2_req_addr[16:0] ^ 17'h10000}; // @[DCache.scala:339:19]
wire [39:0] _get_legal_T_19; // @[Parameters.scala:137:31]
assign _get_legal_T_19 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _get_legal_T_24; // @[Parameters.scala:137:31]
assign _get_legal_T_24 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _put_legal_T_63; // @[Parameters.scala:137:31]
assign _put_legal_T_63 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _putpartial_legal_T_63; // @[Parameters.scala:137:31]
assign _putpartial_legal_T_63 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_53; // @[Parameters.scala:137:31]
assign _atomics_legal_T_53 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_113; // @[Parameters.scala:137:31]
assign _atomics_legal_T_113 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_173; // @[Parameters.scala:137:31]
assign _atomics_legal_T_173 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_233; // @[Parameters.scala:137:31]
assign _atomics_legal_T_233 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_293; // @[Parameters.scala:137:31]
assign _atomics_legal_T_293 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_353; // @[Parameters.scala:137:31]
assign _atomics_legal_T_353 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_413; // @[Parameters.scala:137:31]
assign _atomics_legal_T_413 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_473; // @[Parameters.scala:137:31]
assign _atomics_legal_T_473 = _GEN_84; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_533; // @[Parameters.scala:137:31]
assign _atomics_legal_T_533 = _GEN_84; // @[Parameters.scala:137:31]
wire [40:0] _get_legal_T_20 = {1'h0, _get_legal_T_19}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _get_legal_T_21 = _get_legal_T_20 & 41'hFDEFB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _get_legal_T_22 = _get_legal_T_21; // @[Parameters.scala:137:46]
wire _get_legal_T_23 = _get_legal_T_22 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _get_legal_T_25 = {1'h0, _get_legal_T_24}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _get_legal_T_26 = _get_legal_T_25 & 41'hFFEF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _get_legal_T_27 = _get_legal_T_26; // @[Parameters.scala:137:46]
wire _get_legal_T_28 = _get_legal_T_27 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_85 = {s2_req_addr[39:26], s2_req_addr[25:0] ^ 26'h2000000}; // @[DCache.scala:339:19]
wire [39:0] _get_legal_T_29; // @[Parameters.scala:137:31]
assign _get_legal_T_29 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _put_legal_T_24; // @[Parameters.scala:137:31]
assign _put_legal_T_24 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _putpartial_legal_T_24; // @[Parameters.scala:137:31]
assign _putpartial_legal_T_24 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_14; // @[Parameters.scala:137:31]
assign _atomics_legal_T_14 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_74; // @[Parameters.scala:137:31]
assign _atomics_legal_T_74 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_134; // @[Parameters.scala:137:31]
assign _atomics_legal_T_134 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_194; // @[Parameters.scala:137:31]
assign _atomics_legal_T_194 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_254; // @[Parameters.scala:137:31]
assign _atomics_legal_T_254 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_314; // @[Parameters.scala:137:31]
assign _atomics_legal_T_314 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_374; // @[Parameters.scala:137:31]
assign _atomics_legal_T_374 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_434; // @[Parameters.scala:137:31]
assign _atomics_legal_T_434 = _GEN_85; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_494; // @[Parameters.scala:137:31]
assign _atomics_legal_T_494 = _GEN_85; // @[Parameters.scala:137:31]
wire [40:0] _get_legal_T_30 = {1'h0, _get_legal_T_29}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _get_legal_T_31 = _get_legal_T_30 & 41'hFFEF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _get_legal_T_32 = _get_legal_T_31; // @[Parameters.scala:137:46]
wire _get_legal_T_33 = _get_legal_T_32 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_86 = {s2_req_addr[39:28], s2_req_addr[27:0] ^ 28'h8000000}; // @[DCache.scala:339:19]
wire [39:0] _get_legal_T_34; // @[Parameters.scala:137:31]
assign _get_legal_T_34 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _put_legal_T_34; // @[Parameters.scala:137:31]
assign _put_legal_T_34 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _putpartial_legal_T_34; // @[Parameters.scala:137:31]
assign _putpartial_legal_T_34 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_24; // @[Parameters.scala:137:31]
assign _atomics_legal_T_24 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_84; // @[Parameters.scala:137:31]
assign _atomics_legal_T_84 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_144; // @[Parameters.scala:137:31]
assign _atomics_legal_T_144 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_204; // @[Parameters.scala:137:31]
assign _atomics_legal_T_204 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_264; // @[Parameters.scala:137:31]
assign _atomics_legal_T_264 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_324; // @[Parameters.scala:137:31]
assign _atomics_legal_T_324 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_384; // @[Parameters.scala:137:31]
assign _atomics_legal_T_384 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_444; // @[Parameters.scala:137:31]
assign _atomics_legal_T_444 = _GEN_86; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_504; // @[Parameters.scala:137:31]
assign _atomics_legal_T_504 = _GEN_86; // @[Parameters.scala:137:31]
wire [40:0] _get_legal_T_35 = {1'h0, _get_legal_T_34}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _get_legal_T_36 = _get_legal_T_35 & 41'hFFEF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _get_legal_T_37 = _get_legal_T_36; // @[Parameters.scala:137:46]
wire _get_legal_T_38 = _get_legal_T_37 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_87 = {s2_req_addr[39:28], s2_req_addr[27:0] ^ 28'hC000000}; // @[DCache.scala:339:19]
wire [39:0] _get_legal_T_39; // @[Parameters.scala:137:31]
assign _get_legal_T_39 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _put_legal_T_39; // @[Parameters.scala:137:31]
assign _put_legal_T_39 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _putpartial_legal_T_39; // @[Parameters.scala:137:31]
assign _putpartial_legal_T_39 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_29; // @[Parameters.scala:137:31]
assign _atomics_legal_T_29 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_89; // @[Parameters.scala:137:31]
assign _atomics_legal_T_89 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_149; // @[Parameters.scala:137:31]
assign _atomics_legal_T_149 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_209; // @[Parameters.scala:137:31]
assign _atomics_legal_T_209 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_269; // @[Parameters.scala:137:31]
assign _atomics_legal_T_269 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_329; // @[Parameters.scala:137:31]
assign _atomics_legal_T_329 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_389; // @[Parameters.scala:137:31]
assign _atomics_legal_T_389 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_449; // @[Parameters.scala:137:31]
assign _atomics_legal_T_449 = _GEN_87; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_509; // @[Parameters.scala:137:31]
assign _atomics_legal_T_509 = _GEN_87; // @[Parameters.scala:137:31]
wire [40:0] _get_legal_T_40 = {1'h0, _get_legal_T_39}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _get_legal_T_41 = _get_legal_T_40 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _get_legal_T_42 = _get_legal_T_41; // @[Parameters.scala:137:46]
wire _get_legal_T_43 = _get_legal_T_42 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_88 = {s2_req_addr[39:29], s2_req_addr[28:0] ^ 29'h10020000}; // @[DCache.scala:339:19]
wire [39:0] _get_legal_T_44; // @[Parameters.scala:137:31]
assign _get_legal_T_44 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _put_legal_T_44; // @[Parameters.scala:137:31]
assign _put_legal_T_44 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _putpartial_legal_T_44; // @[Parameters.scala:137:31]
assign _putpartial_legal_T_44 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_34; // @[Parameters.scala:137:31]
assign _atomics_legal_T_34 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_94; // @[Parameters.scala:137:31]
assign _atomics_legal_T_94 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_154; // @[Parameters.scala:137:31]
assign _atomics_legal_T_154 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_214; // @[Parameters.scala:137:31]
assign _atomics_legal_T_214 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_274; // @[Parameters.scala:137:31]
assign _atomics_legal_T_274 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_334; // @[Parameters.scala:137:31]
assign _atomics_legal_T_334 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_394; // @[Parameters.scala:137:31]
assign _atomics_legal_T_394 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_454; // @[Parameters.scala:137:31]
assign _atomics_legal_T_454 = _GEN_88; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_514; // @[Parameters.scala:137:31]
assign _atomics_legal_T_514 = _GEN_88; // @[Parameters.scala:137:31]
wire [40:0] _get_legal_T_45 = {1'h0, _get_legal_T_44}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _get_legal_T_46 = _get_legal_T_45 & 41'hFFEFB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _get_legal_T_47 = _get_legal_T_46; // @[Parameters.scala:137:46]
wire _get_legal_T_48 = _get_legal_T_47 == 41'h0; // @[Parameters.scala:137:{46,59}]
assign io_cpu_s2_paddr_0 = s2_req_addr[31:0]; // @[DCache.scala:101:7, :339:19]
wire [31:0] get_address = s2_req_addr[31:0]; // @[Edges.scala:460:17]
wire [31:0] put_address = s2_req_addr[31:0]; // @[Edges.scala:480:17]
wire [31:0] putpartial_address = s2_req_addr[31:0]; // @[Edges.scala:500:17]
wire [31:0] atomics_a_address = s2_req_addr[31:0]; // @[Edges.scala:534:17]
wire [31:0] atomics_a_1_address = s2_req_addr[31:0]; // @[Edges.scala:534:17]
wire [31:0] atomics_a_2_address = s2_req_addr[31:0]; // @[Edges.scala:534:17]
wire [31:0] atomics_a_3_address = s2_req_addr[31:0]; // @[Edges.scala:534:17]
wire [31:0] atomics_a_4_address = s2_req_addr[31:0]; // @[Edges.scala:517:17]
wire [31:0] atomics_a_5_address = s2_req_addr[31:0]; // @[Edges.scala:517:17]
wire [31:0] atomics_a_6_address = s2_req_addr[31:0]; // @[Edges.scala:517:17]
wire [31:0] atomics_a_7_address = s2_req_addr[31:0]; // @[Edges.scala:517:17]
wire [31:0] atomics_a_8_address = s2_req_addr[31:0]; // @[Edges.scala:517:17]
wire [39:0] _GEN_89 = {s2_req_addr[39:32], s2_req_addr[31:0] ^ 32'h80000000}; // @[DCache.scala:339:19]
wire [39:0] _get_legal_T_49; // @[Parameters.scala:137:31]
assign _get_legal_T_49 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _put_legal_T_49; // @[Parameters.scala:137:31]
assign _put_legal_T_49 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _putpartial_legal_T_49; // @[Parameters.scala:137:31]
assign _putpartial_legal_T_49 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_39; // @[Parameters.scala:137:31]
assign _atomics_legal_T_39 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_99; // @[Parameters.scala:137:31]
assign _atomics_legal_T_99 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_159; // @[Parameters.scala:137:31]
assign _atomics_legal_T_159 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_219; // @[Parameters.scala:137:31]
assign _atomics_legal_T_219 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_279; // @[Parameters.scala:137:31]
assign _atomics_legal_T_279 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_339; // @[Parameters.scala:137:31]
assign _atomics_legal_T_339 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_399; // @[Parameters.scala:137:31]
assign _atomics_legal_T_399 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_459; // @[Parameters.scala:137:31]
assign _atomics_legal_T_459 = _GEN_89; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_519; // @[Parameters.scala:137:31]
assign _atomics_legal_T_519 = _GEN_89; // @[Parameters.scala:137:31]
wire [40:0] _get_legal_T_50 = {1'h0, _get_legal_T_49}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _get_legal_T_51 = _get_legal_T_50 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _get_legal_T_52 = _get_legal_T_51; // @[Parameters.scala:137:46]
wire _get_legal_T_53 = _get_legal_T_52 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _get_legal_T_54 = _get_legal_T_18 | _get_legal_T_23; // @[Parameters.scala:685:42]
wire _get_legal_T_55 = _get_legal_T_54 | _get_legal_T_28; // @[Parameters.scala:685:42]
wire _get_legal_T_56 = _get_legal_T_55 | _get_legal_T_33; // @[Parameters.scala:685:42]
wire _get_legal_T_57 = _get_legal_T_56 | _get_legal_T_38; // @[Parameters.scala:685:42]
wire _get_legal_T_58 = _get_legal_T_57 | _get_legal_T_43; // @[Parameters.scala:685:42]
wire _get_legal_T_59 = _get_legal_T_58 | _get_legal_T_48; // @[Parameters.scala:685:42]
wire _get_legal_T_60 = _get_legal_T_59 | _get_legal_T_53; // @[Parameters.scala:685:42]
wire _get_legal_T_61 = _get_legal_T_60; // @[Parameters.scala:684:54, :685:42]
wire [39:0] _GEN_90 = {s2_req_addr[39:18], s2_req_addr[17:0] ^ 18'h20000}; // @[DCache.scala:339:19]
wire [39:0] _get_legal_T_66; // @[Parameters.scala:137:31]
assign _get_legal_T_66 = _GEN_90; // @[Parameters.scala:137:31]
wire [39:0] _put_legal_T_73; // @[Parameters.scala:137:31]
assign _put_legal_T_73 = _GEN_90; // @[Parameters.scala:137:31]
wire [39:0] _putpartial_legal_T_73; // @[Parameters.scala:137:31]
assign _putpartial_legal_T_73 = _GEN_90; // @[Parameters.scala:137:31]
wire [40:0] _get_legal_T_67 = {1'h0, _get_legal_T_66}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _get_legal_T_68 = _get_legal_T_67 & 41'hFFEF8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _get_legal_T_69 = _get_legal_T_68; // @[Parameters.scala:137:46]
wire _get_legal_T_70 = _get_legal_T_69 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _get_legal_T_71 = _get_legal_T_70; // @[Parameters.scala:684:54]
wire _get_legal_T_73 = _get_legal_T_72 | _get_legal_T_61; // @[Parameters.scala:684:54, :686:26]
wire get_legal = _get_legal_T_73 | _get_legal_T_71; // @[Parameters.scala:684:54, :686:26]
wire [7:0] _get_a_mask_T; // @[Misc.scala:222:10]
wire [3:0] get_size; // @[Edges.scala:460:17]
wire [7:0] get_mask; // @[Edges.scala:460:17]
wire [3:0] _GEN_91 = {2'h0, s2_req_size}; // @[Edges.scala:463:15]
assign get_size = _GEN_91; // @[Edges.scala:460:17, :463:15]
wire [3:0] put_size; // @[Edges.scala:480:17]
assign put_size = _GEN_91; // @[Edges.scala:463:15, :480:17]
wire [3:0] putpartial_size; // @[Edges.scala:500:17]
assign putpartial_size = _GEN_91; // @[Edges.scala:463:15, :500:17]
wire [3:0] atomics_a_size; // @[Edges.scala:534:17]
assign atomics_a_size = _GEN_91; // @[Edges.scala:463:15, :534:17]
wire [3:0] atomics_a_1_size; // @[Edges.scala:534:17]
assign atomics_a_1_size = _GEN_91; // @[Edges.scala:463:15, :534:17]
wire [3:0] atomics_a_2_size; // @[Edges.scala:534:17]
assign atomics_a_2_size = _GEN_91; // @[Edges.scala:463:15, :534:17]
wire [3:0] atomics_a_3_size; // @[Edges.scala:534:17]
assign atomics_a_3_size = _GEN_91; // @[Edges.scala:463:15, :534:17]
wire [3:0] atomics_a_4_size; // @[Edges.scala:517:17]
assign atomics_a_4_size = _GEN_91; // @[Edges.scala:463:15, :517:17]
wire [3:0] atomics_a_5_size; // @[Edges.scala:517:17]
assign atomics_a_5_size = _GEN_91; // @[Edges.scala:463:15, :517:17]
wire [3:0] atomics_a_6_size; // @[Edges.scala:517:17]
assign atomics_a_6_size = _GEN_91; // @[Edges.scala:463:15, :517:17]
wire [3:0] atomics_a_7_size; // @[Edges.scala:517:17]
assign atomics_a_7_size = _GEN_91; // @[Edges.scala:463:15, :517:17]
wire [3:0] atomics_a_8_size; // @[Edges.scala:517:17]
assign atomics_a_8_size = _GEN_91; // @[Edges.scala:463:15, :517:17]
wire [2:0] _GEN_92 = {1'h0, s2_req_size}; // @[Misc.scala:202:34]
wire [2:0] _get_a_mask_sizeOH_T; // @[Misc.scala:202:34]
assign _get_a_mask_sizeOH_T = _GEN_92; // @[Misc.scala:202:34]
wire [2:0] _put_a_mask_sizeOH_T; // @[Misc.scala:202:34]
assign _put_a_mask_sizeOH_T = _GEN_92; // @[Misc.scala:202:34]
wire [2:0] _atomics_a_mask_sizeOH_T; // @[Misc.scala:202:34]
assign _atomics_a_mask_sizeOH_T = _GEN_92; // @[Misc.scala:202:34]
wire [2:0] _atomics_a_mask_sizeOH_T_3; // @[Misc.scala:202:34]
assign _atomics_a_mask_sizeOH_T_3 = _GEN_92; // @[Misc.scala:202:34]
wire [2:0] _atomics_a_mask_sizeOH_T_6; // @[Misc.scala:202:34]
assign _atomics_a_mask_sizeOH_T_6 = _GEN_92; // @[Misc.scala:202:34]
wire [2:0] _atomics_a_mask_sizeOH_T_9; // @[Misc.scala:202:34]
assign _atomics_a_mask_sizeOH_T_9 = _GEN_92; // @[Misc.scala:202:34]
wire [2:0] _atomics_a_mask_sizeOH_T_12; // @[Misc.scala:202:34]
assign _atomics_a_mask_sizeOH_T_12 = _GEN_92; // @[Misc.scala:202:34]
wire [2:0] _atomics_a_mask_sizeOH_T_15; // @[Misc.scala:202:34]
assign _atomics_a_mask_sizeOH_T_15 = _GEN_92; // @[Misc.scala:202:34]
wire [2:0] _atomics_a_mask_sizeOH_T_18; // @[Misc.scala:202:34]
assign _atomics_a_mask_sizeOH_T_18 = _GEN_92; // @[Misc.scala:202:34]
wire [2:0] _atomics_a_mask_sizeOH_T_21; // @[Misc.scala:202:34]
assign _atomics_a_mask_sizeOH_T_21 = _GEN_92; // @[Misc.scala:202:34]
wire [2:0] _atomics_a_mask_sizeOH_T_24; // @[Misc.scala:202:34]
assign _atomics_a_mask_sizeOH_T_24 = _GEN_92; // @[Misc.scala:202:34]
wire [1:0] get_a_mask_sizeOH_shiftAmount = _get_a_mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _get_a_mask_sizeOH_T_1 = 4'h1 << get_a_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _get_a_mask_sizeOH_T_2 = _get_a_mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] get_a_mask_sizeOH = {_get_a_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire get_a_mask_sub_sub_sub_0_1 = &s2_req_size; // @[Misc.scala:206:21]
wire get_a_mask_sub_sub_size = get_a_mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire get_a_mask_sub_sub_bit = s2_req_addr[2]; // @[Misc.scala:210:26]
wire put_a_mask_sub_sub_bit = s2_req_addr[2]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_sub_bit = s2_req_addr[2]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_sub_bit_1 = s2_req_addr[2]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_sub_bit_2 = s2_req_addr[2]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_sub_bit_3 = s2_req_addr[2]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_sub_bit_4 = s2_req_addr[2]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_sub_bit_5 = s2_req_addr[2]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_sub_bit_6 = s2_req_addr[2]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_sub_bit_7 = s2_req_addr[2]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_sub_bit_8 = s2_req_addr[2]; // @[Misc.scala:210:26]
wire _io_cpu_resp_bits_data_shifted_T = s2_req_addr[2]; // @[Misc.scala:210:26]
wire _io_cpu_resp_bits_data_word_bypass_shifted_T = s2_req_addr[2]; // @[Misc.scala:210:26]
wire get_a_mask_sub_sub_1_2 = get_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire get_a_mask_sub_sub_nbit = ~get_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire get_a_mask_sub_sub_0_2 = get_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _get_a_mask_sub_sub_acc_T = get_a_mask_sub_sub_size & get_a_mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_sub_sub_0_1 = get_a_mask_sub_sub_sub_0_1 | _get_a_mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _get_a_mask_sub_sub_acc_T_1 = get_a_mask_sub_sub_size & get_a_mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_sub_sub_1_1 = get_a_mask_sub_sub_sub_0_1 | _get_a_mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire get_a_mask_sub_size = get_a_mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire get_a_mask_sub_bit = s2_req_addr[1]; // @[Misc.scala:210:26]
wire put_a_mask_sub_bit = s2_req_addr[1]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_bit = s2_req_addr[1]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_bit_1 = s2_req_addr[1]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_bit_2 = s2_req_addr[1]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_bit_3 = s2_req_addr[1]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_bit_4 = s2_req_addr[1]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_bit_5 = s2_req_addr[1]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_bit_6 = s2_req_addr[1]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_bit_7 = s2_req_addr[1]; // @[Misc.scala:210:26]
wire atomics_a_mask_sub_bit_8 = s2_req_addr[1]; // @[Misc.scala:210:26]
wire _io_cpu_resp_bits_data_shifted_T_3 = s2_req_addr[1]; // @[Misc.scala:210:26]
wire get_a_mask_sub_nbit = ~get_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire get_a_mask_sub_0_2 = get_a_mask_sub_sub_0_2 & get_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _get_a_mask_sub_acc_T = get_a_mask_sub_size & get_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_sub_0_1 = get_a_mask_sub_sub_0_1 | _get_a_mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire get_a_mask_sub_1_2 = get_a_mask_sub_sub_0_2 & get_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _get_a_mask_sub_acc_T_1 = get_a_mask_sub_size & get_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_sub_1_1 = get_a_mask_sub_sub_0_1 | _get_a_mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire get_a_mask_sub_2_2 = get_a_mask_sub_sub_1_2 & get_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _get_a_mask_sub_acc_T_2 = get_a_mask_sub_size & get_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_sub_2_1 = get_a_mask_sub_sub_1_1 | _get_a_mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire get_a_mask_sub_3_2 = get_a_mask_sub_sub_1_2 & get_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _get_a_mask_sub_acc_T_3 = get_a_mask_sub_size & get_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_sub_3_1 = get_a_mask_sub_sub_1_1 | _get_a_mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire get_a_mask_size = get_a_mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire get_a_mask_bit = s2_req_addr[0]; // @[Misc.scala:210:26]
wire put_a_mask_bit = s2_req_addr[0]; // @[Misc.scala:210:26]
wire atomics_a_mask_bit = s2_req_addr[0]; // @[Misc.scala:210:26]
wire atomics_a_mask_bit_1 = s2_req_addr[0]; // @[Misc.scala:210:26]
wire atomics_a_mask_bit_2 = s2_req_addr[0]; // @[Misc.scala:210:26]
wire atomics_a_mask_bit_3 = s2_req_addr[0]; // @[Misc.scala:210:26]
wire atomics_a_mask_bit_4 = s2_req_addr[0]; // @[Misc.scala:210:26]
wire atomics_a_mask_bit_5 = s2_req_addr[0]; // @[Misc.scala:210:26]
wire atomics_a_mask_bit_6 = s2_req_addr[0]; // @[Misc.scala:210:26]
wire atomics_a_mask_bit_7 = s2_req_addr[0]; // @[Misc.scala:210:26]
wire atomics_a_mask_bit_8 = s2_req_addr[0]; // @[Misc.scala:210:26]
wire _io_cpu_resp_bits_data_shifted_T_6 = s2_req_addr[0]; // @[Misc.scala:210:26]
wire get_a_mask_nbit = ~get_a_mask_bit; // @[Misc.scala:210:26, :211:20]
wire get_a_mask_eq = get_a_mask_sub_0_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _get_a_mask_acc_T = get_a_mask_size & get_a_mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_acc = get_a_mask_sub_0_1 | _get_a_mask_acc_T; // @[Misc.scala:215:{29,38}]
wire get_a_mask_eq_1 = get_a_mask_sub_0_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _get_a_mask_acc_T_1 = get_a_mask_size & get_a_mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_acc_1 = get_a_mask_sub_0_1 | _get_a_mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire get_a_mask_eq_2 = get_a_mask_sub_1_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _get_a_mask_acc_T_2 = get_a_mask_size & get_a_mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_acc_2 = get_a_mask_sub_1_1 | _get_a_mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire get_a_mask_eq_3 = get_a_mask_sub_1_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _get_a_mask_acc_T_3 = get_a_mask_size & get_a_mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_acc_3 = get_a_mask_sub_1_1 | _get_a_mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire get_a_mask_eq_4 = get_a_mask_sub_2_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _get_a_mask_acc_T_4 = get_a_mask_size & get_a_mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_acc_4 = get_a_mask_sub_2_1 | _get_a_mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire get_a_mask_eq_5 = get_a_mask_sub_2_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _get_a_mask_acc_T_5 = get_a_mask_size & get_a_mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_acc_5 = get_a_mask_sub_2_1 | _get_a_mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire get_a_mask_eq_6 = get_a_mask_sub_3_2 & get_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _get_a_mask_acc_T_6 = get_a_mask_size & get_a_mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_acc_6 = get_a_mask_sub_3_1 | _get_a_mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire get_a_mask_eq_7 = get_a_mask_sub_3_2 & get_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _get_a_mask_acc_T_7 = get_a_mask_size & get_a_mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire get_a_mask_acc_7 = get_a_mask_sub_3_1 | _get_a_mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] get_a_mask_lo_lo = {get_a_mask_acc_1, get_a_mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] get_a_mask_lo_hi = {get_a_mask_acc_3, get_a_mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] get_a_mask_lo = {get_a_mask_lo_hi, get_a_mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] get_a_mask_hi_lo = {get_a_mask_acc_5, get_a_mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] get_a_mask_hi_hi = {get_a_mask_acc_7, get_a_mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] get_a_mask_hi = {get_a_mask_hi_hi, get_a_mask_hi_lo}; // @[Misc.scala:222:10]
assign _get_a_mask_T = {get_a_mask_hi, get_a_mask_lo}; // @[Misc.scala:222:10]
assign get_mask = _get_a_mask_T; // @[Misc.scala:222:10]
wire [40:0] _put_legal_T_5 = {1'h0, _put_legal_T_4}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_6 = _put_legal_T_5 & 41'hFFFFB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_7 = _put_legal_T_6; // @[Parameters.scala:137:46]
wire _put_legal_T_8 = _put_legal_T_7 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _put_legal_T_9 = _put_legal_T_8; // @[Parameters.scala:684:54]
wire _put_legal_T_79 = _put_legal_T_9; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _put_legal_T_15 = {1'h0, _put_legal_T_14}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_16 = _put_legal_T_15 & 41'hFFFFA000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_17 = _put_legal_T_16; // @[Parameters.scala:137:46]
wire _put_legal_T_18 = _put_legal_T_17 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_93 = {s2_req_addr[39:21], s2_req_addr[20:0] ^ 21'h100000}; // @[DCache.scala:339:19]
wire [39:0] _put_legal_T_19; // @[Parameters.scala:137:31]
assign _put_legal_T_19 = _GEN_93; // @[Parameters.scala:137:31]
wire [39:0] _putpartial_legal_T_19; // @[Parameters.scala:137:31]
assign _putpartial_legal_T_19 = _GEN_93; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_9; // @[Parameters.scala:137:31]
assign _atomics_legal_T_9 = _GEN_93; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_69; // @[Parameters.scala:137:31]
assign _atomics_legal_T_69 = _GEN_93; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_129; // @[Parameters.scala:137:31]
assign _atomics_legal_T_129 = _GEN_93; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_189; // @[Parameters.scala:137:31]
assign _atomics_legal_T_189 = _GEN_93; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_249; // @[Parameters.scala:137:31]
assign _atomics_legal_T_249 = _GEN_93; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_309; // @[Parameters.scala:137:31]
assign _atomics_legal_T_309 = _GEN_93; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_369; // @[Parameters.scala:137:31]
assign _atomics_legal_T_369 = _GEN_93; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_429; // @[Parameters.scala:137:31]
assign _atomics_legal_T_429 = _GEN_93; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_489; // @[Parameters.scala:137:31]
assign _atomics_legal_T_489 = _GEN_93; // @[Parameters.scala:137:31]
wire [40:0] _put_legal_T_20 = {1'h0, _put_legal_T_19}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_21 = _put_legal_T_20 & 41'hFFFEB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_22 = _put_legal_T_21; // @[Parameters.scala:137:46]
wire _put_legal_T_23 = _put_legal_T_22 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _put_legal_T_25 = {1'h0, _put_legal_T_24}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_26 = _put_legal_T_25 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_27 = _put_legal_T_26; // @[Parameters.scala:137:46]
wire _put_legal_T_28 = _put_legal_T_27 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _GEN_94 = {s2_req_addr[39:26], s2_req_addr[25:0] ^ 26'h2010000}; // @[DCache.scala:339:19]
wire [39:0] _put_legal_T_29; // @[Parameters.scala:137:31]
assign _put_legal_T_29 = _GEN_94; // @[Parameters.scala:137:31]
wire [39:0] _putpartial_legal_T_29; // @[Parameters.scala:137:31]
assign _putpartial_legal_T_29 = _GEN_94; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_19; // @[Parameters.scala:137:31]
assign _atomics_legal_T_19 = _GEN_94; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_79; // @[Parameters.scala:137:31]
assign _atomics_legal_T_79 = _GEN_94; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_139; // @[Parameters.scala:137:31]
assign _atomics_legal_T_139 = _GEN_94; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_199; // @[Parameters.scala:137:31]
assign _atomics_legal_T_199 = _GEN_94; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_259; // @[Parameters.scala:137:31]
assign _atomics_legal_T_259 = _GEN_94; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_319; // @[Parameters.scala:137:31]
assign _atomics_legal_T_319 = _GEN_94; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_379; // @[Parameters.scala:137:31]
assign _atomics_legal_T_379 = _GEN_94; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_439; // @[Parameters.scala:137:31]
assign _atomics_legal_T_439 = _GEN_94; // @[Parameters.scala:137:31]
wire [39:0] _atomics_legal_T_499; // @[Parameters.scala:137:31]
assign _atomics_legal_T_499 = _GEN_94; // @[Parameters.scala:137:31]
wire [40:0] _put_legal_T_30 = {1'h0, _put_legal_T_29}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_31 = _put_legal_T_30 & 41'hFFFFB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_32 = _put_legal_T_31; // @[Parameters.scala:137:46]
wire _put_legal_T_33 = _put_legal_T_32 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _put_legal_T_35 = {1'h0, _put_legal_T_34}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_36 = _put_legal_T_35 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_37 = _put_legal_T_36; // @[Parameters.scala:137:46]
wire _put_legal_T_38 = _put_legal_T_37 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _put_legal_T_40 = {1'h0, _put_legal_T_39}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_41 = _put_legal_T_40 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_42 = _put_legal_T_41; // @[Parameters.scala:137:46]
wire _put_legal_T_43 = _put_legal_T_42 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _put_legal_T_45 = {1'h0, _put_legal_T_44}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_46 = _put_legal_T_45 & 41'hFFFFB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_47 = _put_legal_T_46; // @[Parameters.scala:137:46]
wire _put_legal_T_48 = _put_legal_T_47 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _put_legal_T_50 = {1'h0, _put_legal_T_49}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_51 = _put_legal_T_50 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_52 = _put_legal_T_51; // @[Parameters.scala:137:46]
wire _put_legal_T_53 = _put_legal_T_52 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _put_legal_T_54 = _put_legal_T_18 | _put_legal_T_23; // @[Parameters.scala:685:42]
wire _put_legal_T_55 = _put_legal_T_54 | _put_legal_T_28; // @[Parameters.scala:685:42]
wire _put_legal_T_56 = _put_legal_T_55 | _put_legal_T_33; // @[Parameters.scala:685:42]
wire _put_legal_T_57 = _put_legal_T_56 | _put_legal_T_38; // @[Parameters.scala:685:42]
wire _put_legal_T_58 = _put_legal_T_57 | _put_legal_T_43; // @[Parameters.scala:685:42]
wire _put_legal_T_59 = _put_legal_T_58 | _put_legal_T_48; // @[Parameters.scala:685:42]
wire _put_legal_T_60 = _put_legal_T_59 | _put_legal_T_53; // @[Parameters.scala:685:42]
wire _put_legal_T_61 = _put_legal_T_60; // @[Parameters.scala:684:54, :685:42]
wire [40:0] _put_legal_T_64 = {1'h0, _put_legal_T_63}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_65 = _put_legal_T_64 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_66 = _put_legal_T_65; // @[Parameters.scala:137:46]
wire _put_legal_T_67 = _put_legal_T_66 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _put_legal_T_74 = {1'h0, _put_legal_T_73}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _put_legal_T_75 = _put_legal_T_74 & 41'hFFFF8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _put_legal_T_76 = _put_legal_T_75; // @[Parameters.scala:137:46]
wire _put_legal_T_77 = _put_legal_T_76 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _put_legal_T_78 = _put_legal_T_77; // @[Parameters.scala:684:54]
wire _put_legal_T_80 = _put_legal_T_79 | _put_legal_T_61; // @[Parameters.scala:684:54, :686:26]
wire _put_legal_T_81 = _put_legal_T_80; // @[Parameters.scala:686:26]
wire put_legal = _put_legal_T_81 | _put_legal_T_78; // @[Parameters.scala:684:54, :686:26]
wire [7:0] _put_a_mask_T; // @[Misc.scala:222:10]
wire [7:0] put_mask; // @[Edges.scala:480:17]
wire [1:0] put_a_mask_sizeOH_shiftAmount = _put_a_mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _put_a_mask_sizeOH_T_1 = 4'h1 << put_a_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _put_a_mask_sizeOH_T_2 = _put_a_mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] put_a_mask_sizeOH = {_put_a_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire put_a_mask_sub_sub_sub_0_1 = &s2_req_size; // @[Misc.scala:206:21]
wire put_a_mask_sub_sub_size = put_a_mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire put_a_mask_sub_sub_1_2 = put_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire put_a_mask_sub_sub_nbit = ~put_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire put_a_mask_sub_sub_0_2 = put_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _put_a_mask_sub_sub_acc_T = put_a_mask_sub_sub_size & put_a_mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_sub_sub_0_1 = put_a_mask_sub_sub_sub_0_1 | _put_a_mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _put_a_mask_sub_sub_acc_T_1 = put_a_mask_sub_sub_size & put_a_mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_sub_sub_1_1 = put_a_mask_sub_sub_sub_0_1 | _put_a_mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire put_a_mask_sub_size = put_a_mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire put_a_mask_sub_nbit = ~put_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire put_a_mask_sub_0_2 = put_a_mask_sub_sub_0_2 & put_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _put_a_mask_sub_acc_T = put_a_mask_sub_size & put_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_sub_0_1 = put_a_mask_sub_sub_0_1 | _put_a_mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire put_a_mask_sub_1_2 = put_a_mask_sub_sub_0_2 & put_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _put_a_mask_sub_acc_T_1 = put_a_mask_sub_size & put_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_sub_1_1 = put_a_mask_sub_sub_0_1 | _put_a_mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire put_a_mask_sub_2_2 = put_a_mask_sub_sub_1_2 & put_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _put_a_mask_sub_acc_T_2 = put_a_mask_sub_size & put_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_sub_2_1 = put_a_mask_sub_sub_1_1 | _put_a_mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire put_a_mask_sub_3_2 = put_a_mask_sub_sub_1_2 & put_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _put_a_mask_sub_acc_T_3 = put_a_mask_sub_size & put_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_sub_3_1 = put_a_mask_sub_sub_1_1 | _put_a_mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire put_a_mask_size = put_a_mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire put_a_mask_nbit = ~put_a_mask_bit; // @[Misc.scala:210:26, :211:20]
wire put_a_mask_eq = put_a_mask_sub_0_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _put_a_mask_acc_T = put_a_mask_size & put_a_mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_acc = put_a_mask_sub_0_1 | _put_a_mask_acc_T; // @[Misc.scala:215:{29,38}]
wire put_a_mask_eq_1 = put_a_mask_sub_0_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _put_a_mask_acc_T_1 = put_a_mask_size & put_a_mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_acc_1 = put_a_mask_sub_0_1 | _put_a_mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire put_a_mask_eq_2 = put_a_mask_sub_1_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _put_a_mask_acc_T_2 = put_a_mask_size & put_a_mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_acc_2 = put_a_mask_sub_1_1 | _put_a_mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire put_a_mask_eq_3 = put_a_mask_sub_1_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _put_a_mask_acc_T_3 = put_a_mask_size & put_a_mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_acc_3 = put_a_mask_sub_1_1 | _put_a_mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire put_a_mask_eq_4 = put_a_mask_sub_2_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _put_a_mask_acc_T_4 = put_a_mask_size & put_a_mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_acc_4 = put_a_mask_sub_2_1 | _put_a_mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire put_a_mask_eq_5 = put_a_mask_sub_2_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _put_a_mask_acc_T_5 = put_a_mask_size & put_a_mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_acc_5 = put_a_mask_sub_2_1 | _put_a_mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire put_a_mask_eq_6 = put_a_mask_sub_3_2 & put_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _put_a_mask_acc_T_6 = put_a_mask_size & put_a_mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_acc_6 = put_a_mask_sub_3_1 | _put_a_mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire put_a_mask_eq_7 = put_a_mask_sub_3_2 & put_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _put_a_mask_acc_T_7 = put_a_mask_size & put_a_mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire put_a_mask_acc_7 = put_a_mask_sub_3_1 | _put_a_mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] put_a_mask_lo_lo = {put_a_mask_acc_1, put_a_mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] put_a_mask_lo_hi = {put_a_mask_acc_3, put_a_mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] put_a_mask_lo = {put_a_mask_lo_hi, put_a_mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] put_a_mask_hi_lo = {put_a_mask_acc_5, put_a_mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] put_a_mask_hi_hi = {put_a_mask_acc_7, put_a_mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] put_a_mask_hi = {put_a_mask_hi_hi, put_a_mask_hi_lo}; // @[Misc.scala:222:10]
assign _put_a_mask_T = {put_a_mask_hi, put_a_mask_lo}; // @[Misc.scala:222:10]
assign put_mask = _put_a_mask_T; // @[Misc.scala:222:10]
wire [40:0] _putpartial_legal_T_5 = {1'h0, _putpartial_legal_T_4}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_6 = _putpartial_legal_T_5 & 41'hFFFFB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_7 = _putpartial_legal_T_6; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_8 = _putpartial_legal_T_7 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _putpartial_legal_T_9 = _putpartial_legal_T_8; // @[Parameters.scala:684:54]
wire _putpartial_legal_T_79 = _putpartial_legal_T_9; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _putpartial_legal_T_15 = {1'h0, _putpartial_legal_T_14}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_16 = _putpartial_legal_T_15 & 41'hFFFFA000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_17 = _putpartial_legal_T_16; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_18 = _putpartial_legal_T_17 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _putpartial_legal_T_20 = {1'h0, _putpartial_legal_T_19}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_21 = _putpartial_legal_T_20 & 41'hFFFEB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_22 = _putpartial_legal_T_21; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_23 = _putpartial_legal_T_22 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _putpartial_legal_T_25 = {1'h0, _putpartial_legal_T_24}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_26 = _putpartial_legal_T_25 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_27 = _putpartial_legal_T_26; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_28 = _putpartial_legal_T_27 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _putpartial_legal_T_30 = {1'h0, _putpartial_legal_T_29}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_31 = _putpartial_legal_T_30 & 41'hFFFFB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_32 = _putpartial_legal_T_31; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_33 = _putpartial_legal_T_32 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _putpartial_legal_T_35 = {1'h0, _putpartial_legal_T_34}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_36 = _putpartial_legal_T_35 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_37 = _putpartial_legal_T_36; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_38 = _putpartial_legal_T_37 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _putpartial_legal_T_40 = {1'h0, _putpartial_legal_T_39}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_41 = _putpartial_legal_T_40 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_42 = _putpartial_legal_T_41; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_43 = _putpartial_legal_T_42 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _putpartial_legal_T_45 = {1'h0, _putpartial_legal_T_44}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_46 = _putpartial_legal_T_45 & 41'hFFFFB000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_47 = _putpartial_legal_T_46; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_48 = _putpartial_legal_T_47 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _putpartial_legal_T_50 = {1'h0, _putpartial_legal_T_49}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_51 = _putpartial_legal_T_50 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_52 = _putpartial_legal_T_51; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_53 = _putpartial_legal_T_52 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _putpartial_legal_T_54 = _putpartial_legal_T_18 | _putpartial_legal_T_23; // @[Parameters.scala:685:42]
wire _putpartial_legal_T_55 = _putpartial_legal_T_54 | _putpartial_legal_T_28; // @[Parameters.scala:685:42]
wire _putpartial_legal_T_56 = _putpartial_legal_T_55 | _putpartial_legal_T_33; // @[Parameters.scala:685:42]
wire _putpartial_legal_T_57 = _putpartial_legal_T_56 | _putpartial_legal_T_38; // @[Parameters.scala:685:42]
wire _putpartial_legal_T_58 = _putpartial_legal_T_57 | _putpartial_legal_T_43; // @[Parameters.scala:685:42]
wire _putpartial_legal_T_59 = _putpartial_legal_T_58 | _putpartial_legal_T_48; // @[Parameters.scala:685:42]
wire _putpartial_legal_T_60 = _putpartial_legal_T_59 | _putpartial_legal_T_53; // @[Parameters.scala:685:42]
wire _putpartial_legal_T_61 = _putpartial_legal_T_60; // @[Parameters.scala:684:54, :685:42]
wire [40:0] _putpartial_legal_T_64 = {1'h0, _putpartial_legal_T_63}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_65 = _putpartial_legal_T_64 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_66 = _putpartial_legal_T_65; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_67 = _putpartial_legal_T_66 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _putpartial_legal_T_74 = {1'h0, _putpartial_legal_T_73}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _putpartial_legal_T_75 = _putpartial_legal_T_74 & 41'hFFFF8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _putpartial_legal_T_76 = _putpartial_legal_T_75; // @[Parameters.scala:137:46]
wire _putpartial_legal_T_77 = _putpartial_legal_T_76 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _putpartial_legal_T_78 = _putpartial_legal_T_77; // @[Parameters.scala:684:54]
wire _putpartial_legal_T_80 = _putpartial_legal_T_79 | _putpartial_legal_T_61; // @[Parameters.scala:684:54, :686:26]
wire _putpartial_legal_T_81 = _putpartial_legal_T_80; // @[Parameters.scala:686:26]
wire putpartial_legal = _putpartial_legal_T_81 | _putpartial_legal_T_78; // @[Parameters.scala:684:54, :686:26]
wire [7:0] putpartial_mask; // @[Edges.scala:500:17]
assign putpartial_mask = a_mask[7:0]; // @[Edges.scala:500:17, :508:15]
wire [40:0] _atomics_legal_T_5 = {1'h0, _atomics_legal_T_4}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_6 = _atomics_legal_T_5 & 41'hFFFD8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_7 = _atomics_legal_T_6; // @[Parameters.scala:137:46]
wire _atomics_legal_T_8 = _atomics_legal_T_7 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_10 = {1'h0, _atomics_legal_T_9}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_11 = _atomics_legal_T_10 & 41'hFFFE9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_12 = _atomics_legal_T_11; // @[Parameters.scala:137:46]
wire _atomics_legal_T_13 = _atomics_legal_T_12 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_15 = {1'h0, _atomics_legal_T_14}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_16 = _atomics_legal_T_15 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_17 = _atomics_legal_T_16; // @[Parameters.scala:137:46]
wire _atomics_legal_T_18 = _atomics_legal_T_17 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_20 = {1'h0, _atomics_legal_T_19}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_21 = _atomics_legal_T_20 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_22 = _atomics_legal_T_21; // @[Parameters.scala:137:46]
wire _atomics_legal_T_23 = _atomics_legal_T_22 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_25 = {1'h0, _atomics_legal_T_24}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_26 = _atomics_legal_T_25 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_27 = _atomics_legal_T_26; // @[Parameters.scala:137:46]
wire _atomics_legal_T_28 = _atomics_legal_T_27 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_30 = {1'h0, _atomics_legal_T_29}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_31 = _atomics_legal_T_30 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_32 = _atomics_legal_T_31; // @[Parameters.scala:137:46]
wire _atomics_legal_T_33 = _atomics_legal_T_32 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_35 = {1'h0, _atomics_legal_T_34}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_36 = _atomics_legal_T_35 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_37 = _atomics_legal_T_36; // @[Parameters.scala:137:46]
wire _atomics_legal_T_38 = _atomics_legal_T_37 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_40 = {1'h0, _atomics_legal_T_39}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_41 = _atomics_legal_T_40 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_42 = _atomics_legal_T_41; // @[Parameters.scala:137:46]
wire _atomics_legal_T_43 = _atomics_legal_T_42 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _atomics_legal_T_44 = _atomics_legal_T_8 | _atomics_legal_T_13; // @[Parameters.scala:685:42]
wire _atomics_legal_T_45 = _atomics_legal_T_44 | _atomics_legal_T_18; // @[Parameters.scala:685:42]
wire _atomics_legal_T_46 = _atomics_legal_T_45 | _atomics_legal_T_23; // @[Parameters.scala:685:42]
wire _atomics_legal_T_47 = _atomics_legal_T_46 | _atomics_legal_T_28; // @[Parameters.scala:685:42]
wire _atomics_legal_T_48 = _atomics_legal_T_47 | _atomics_legal_T_33; // @[Parameters.scala:685:42]
wire _atomics_legal_T_49 = _atomics_legal_T_48 | _atomics_legal_T_38; // @[Parameters.scala:685:42]
wire _atomics_legal_T_50 = _atomics_legal_T_49 | _atomics_legal_T_43; // @[Parameters.scala:685:42]
wire _atomics_legal_T_51 = _atomics_legal_T_50; // @[Parameters.scala:684:54, :685:42]
wire _atomics_legal_T_59 = _atomics_legal_T_51; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _atomics_legal_T_54 = {1'h0, _atomics_legal_T_53}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_55 = _atomics_legal_T_54 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_56 = _atomics_legal_T_55; // @[Parameters.scala:137:46]
wire _atomics_legal_T_57 = _atomics_legal_T_56 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire atomics_legal = _atomics_legal_T_59; // @[Parameters.scala:686:26]
wire [7:0] _atomics_a_mask_T; // @[Misc.scala:222:10]
wire [7:0] atomics_a_mask; // @[Edges.scala:534:17]
wire [1:0] atomics_a_mask_sizeOH_shiftAmount = _atomics_a_mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _atomics_a_mask_sizeOH_T_1 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _atomics_a_mask_sizeOH_T_2 = _atomics_a_mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] atomics_a_mask_sizeOH = {_atomics_a_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire atomics_a_mask_sub_sub_sub_0_1 = &s2_req_size; // @[Misc.scala:206:21]
wire atomics_a_mask_sub_sub_size = atomics_a_mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_sub_1_2 = atomics_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_sub_nbit = ~atomics_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_sub_0_2 = atomics_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_sub_acc_T = atomics_a_mask_sub_sub_size & atomics_a_mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_0_1 = atomics_a_mask_sub_sub_sub_0_1 | _atomics_a_mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _atomics_a_mask_sub_sub_acc_T_1 = atomics_a_mask_sub_sub_size & atomics_a_mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_1_1 = atomics_a_mask_sub_sub_sub_0_1 | _atomics_a_mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire atomics_a_mask_sub_size = atomics_a_mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_nbit = ~atomics_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_0_2 = atomics_a_mask_sub_sub_0_2 & atomics_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T = atomics_a_mask_sub_size & atomics_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_0_1 = atomics_a_mask_sub_sub_0_1 | _atomics_a_mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_1_2 = atomics_a_mask_sub_sub_0_2 & atomics_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_1 = atomics_a_mask_sub_size & atomics_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_1_1 = atomics_a_mask_sub_sub_0_1 | _atomics_a_mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_2_2 = atomics_a_mask_sub_sub_1_2 & atomics_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_2 = atomics_a_mask_sub_size & atomics_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_2_1 = atomics_a_mask_sub_sub_1_1 | _atomics_a_mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_3_2 = atomics_a_mask_sub_sub_1_2 & atomics_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_3 = atomics_a_mask_sub_size & atomics_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_3_1 = atomics_a_mask_sub_sub_1_1 | _atomics_a_mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_size = atomics_a_mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_nbit = ~atomics_a_mask_bit; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_eq = atomics_a_mask_sub_0_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T = atomics_a_mask_size & atomics_a_mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc = atomics_a_mask_sub_0_1 | _atomics_a_mask_acc_T; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_1 = atomics_a_mask_sub_0_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_1 = atomics_a_mask_size & atomics_a_mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_1 = atomics_a_mask_sub_0_1 | _atomics_a_mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_2 = atomics_a_mask_sub_1_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_2 = atomics_a_mask_size & atomics_a_mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_2 = atomics_a_mask_sub_1_1 | _atomics_a_mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_3 = atomics_a_mask_sub_1_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_3 = atomics_a_mask_size & atomics_a_mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_3 = atomics_a_mask_sub_1_1 | _atomics_a_mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_4 = atomics_a_mask_sub_2_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_4 = atomics_a_mask_size & atomics_a_mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_4 = atomics_a_mask_sub_2_1 | _atomics_a_mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_5 = atomics_a_mask_sub_2_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_5 = atomics_a_mask_size & atomics_a_mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_5 = atomics_a_mask_sub_2_1 | _atomics_a_mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_6 = atomics_a_mask_sub_3_2 & atomics_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_6 = atomics_a_mask_size & atomics_a_mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_6 = atomics_a_mask_sub_3_1 | _atomics_a_mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_7 = atomics_a_mask_sub_3_2 & atomics_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_7 = atomics_a_mask_size & atomics_a_mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_7 = atomics_a_mask_sub_3_1 | _atomics_a_mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] atomics_a_mask_lo_lo = {atomics_a_mask_acc_1, atomics_a_mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_lo_hi = {atomics_a_mask_acc_3, atomics_a_mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_lo = {atomics_a_mask_lo_hi, atomics_a_mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] atomics_a_mask_hi_lo = {atomics_a_mask_acc_5, atomics_a_mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_hi_hi = {atomics_a_mask_acc_7, atomics_a_mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_hi = {atomics_a_mask_hi_hi, atomics_a_mask_hi_lo}; // @[Misc.scala:222:10]
assign _atomics_a_mask_T = {atomics_a_mask_hi, atomics_a_mask_lo}; // @[Misc.scala:222:10]
assign atomics_a_mask = _atomics_a_mask_T; // @[Misc.scala:222:10]
wire [40:0] _atomics_legal_T_65 = {1'h0, _atomics_legal_T_64}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_66 = _atomics_legal_T_65 & 41'hFFFD8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_67 = _atomics_legal_T_66; // @[Parameters.scala:137:46]
wire _atomics_legal_T_68 = _atomics_legal_T_67 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_70 = {1'h0, _atomics_legal_T_69}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_71 = _atomics_legal_T_70 & 41'hFFFE9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_72 = _atomics_legal_T_71; // @[Parameters.scala:137:46]
wire _atomics_legal_T_73 = _atomics_legal_T_72 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_75 = {1'h0, _atomics_legal_T_74}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_76 = _atomics_legal_T_75 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_77 = _atomics_legal_T_76; // @[Parameters.scala:137:46]
wire _atomics_legal_T_78 = _atomics_legal_T_77 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_80 = {1'h0, _atomics_legal_T_79}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_81 = _atomics_legal_T_80 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_82 = _atomics_legal_T_81; // @[Parameters.scala:137:46]
wire _atomics_legal_T_83 = _atomics_legal_T_82 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_85 = {1'h0, _atomics_legal_T_84}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_86 = _atomics_legal_T_85 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_87 = _atomics_legal_T_86; // @[Parameters.scala:137:46]
wire _atomics_legal_T_88 = _atomics_legal_T_87 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_90 = {1'h0, _atomics_legal_T_89}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_91 = _atomics_legal_T_90 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_92 = _atomics_legal_T_91; // @[Parameters.scala:137:46]
wire _atomics_legal_T_93 = _atomics_legal_T_92 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_95 = {1'h0, _atomics_legal_T_94}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_96 = _atomics_legal_T_95 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_97 = _atomics_legal_T_96; // @[Parameters.scala:137:46]
wire _atomics_legal_T_98 = _atomics_legal_T_97 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_100 = {1'h0, _atomics_legal_T_99}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_101 = _atomics_legal_T_100 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_102 = _atomics_legal_T_101; // @[Parameters.scala:137:46]
wire _atomics_legal_T_103 = _atomics_legal_T_102 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _atomics_legal_T_104 = _atomics_legal_T_68 | _atomics_legal_T_73; // @[Parameters.scala:685:42]
wire _atomics_legal_T_105 = _atomics_legal_T_104 | _atomics_legal_T_78; // @[Parameters.scala:685:42]
wire _atomics_legal_T_106 = _atomics_legal_T_105 | _atomics_legal_T_83; // @[Parameters.scala:685:42]
wire _atomics_legal_T_107 = _atomics_legal_T_106 | _atomics_legal_T_88; // @[Parameters.scala:685:42]
wire _atomics_legal_T_108 = _atomics_legal_T_107 | _atomics_legal_T_93; // @[Parameters.scala:685:42]
wire _atomics_legal_T_109 = _atomics_legal_T_108 | _atomics_legal_T_98; // @[Parameters.scala:685:42]
wire _atomics_legal_T_110 = _atomics_legal_T_109 | _atomics_legal_T_103; // @[Parameters.scala:685:42]
wire _atomics_legal_T_111 = _atomics_legal_T_110; // @[Parameters.scala:684:54, :685:42]
wire _atomics_legal_T_119 = _atomics_legal_T_111; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _atomics_legal_T_114 = {1'h0, _atomics_legal_T_113}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_115 = _atomics_legal_T_114 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_116 = _atomics_legal_T_115; // @[Parameters.scala:137:46]
wire _atomics_legal_T_117 = _atomics_legal_T_116 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire atomics_legal_1 = _atomics_legal_T_119; // @[Parameters.scala:686:26]
wire [7:0] _atomics_a_mask_T_1; // @[Misc.scala:222:10]
wire [7:0] atomics_a_1_mask; // @[Edges.scala:534:17]
wire [1:0] atomics_a_mask_sizeOH_shiftAmount_1 = _atomics_a_mask_sizeOH_T_3[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _atomics_a_mask_sizeOH_T_4 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_1; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _atomics_a_mask_sizeOH_T_5 = _atomics_a_mask_sizeOH_T_4[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] atomics_a_mask_sizeOH_1 = {_atomics_a_mask_sizeOH_T_5[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire atomics_a_mask_sub_sub_sub_0_1_1 = &s2_req_size; // @[Misc.scala:206:21]
wire atomics_a_mask_sub_sub_size_1 = atomics_a_mask_sizeOH_1[2]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_sub_1_2_1 = atomics_a_mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_sub_nbit_1 = ~atomics_a_mask_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_sub_0_2_1 = atomics_a_mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_sub_acc_T_2 = atomics_a_mask_sub_sub_size_1 & atomics_a_mask_sub_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_0_1_1 = atomics_a_mask_sub_sub_sub_0_1_1 | _atomics_a_mask_sub_sub_acc_T_2; // @[Misc.scala:206:21, :215:{29,38}]
wire _atomics_a_mask_sub_sub_acc_T_3 = atomics_a_mask_sub_sub_size_1 & atomics_a_mask_sub_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_1_1_1 = atomics_a_mask_sub_sub_sub_0_1_1 | _atomics_a_mask_sub_sub_acc_T_3; // @[Misc.scala:206:21, :215:{29,38}]
wire atomics_a_mask_sub_size_1 = atomics_a_mask_sizeOH_1[1]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_nbit_1 = ~atomics_a_mask_sub_bit_1; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_0_2_1 = atomics_a_mask_sub_sub_0_2_1 & atomics_a_mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_4 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_0_1_1 = atomics_a_mask_sub_sub_0_1_1 | _atomics_a_mask_sub_acc_T_4; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_1_2_1 = atomics_a_mask_sub_sub_0_2_1 & atomics_a_mask_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_5 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_1_1_1 = atomics_a_mask_sub_sub_0_1_1 | _atomics_a_mask_sub_acc_T_5; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_2_2_1 = atomics_a_mask_sub_sub_1_2_1 & atomics_a_mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_6 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_2_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_2_1_1 = atomics_a_mask_sub_sub_1_1_1 | _atomics_a_mask_sub_acc_T_6; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_3_2_1 = atomics_a_mask_sub_sub_1_2_1 & atomics_a_mask_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_7 = atomics_a_mask_sub_size_1 & atomics_a_mask_sub_3_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_3_1_1 = atomics_a_mask_sub_sub_1_1_1 | _atomics_a_mask_sub_acc_T_7; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_size_1 = atomics_a_mask_sizeOH_1[0]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_nbit_1 = ~atomics_a_mask_bit_1; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_eq_8 = atomics_a_mask_sub_0_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_8 = atomics_a_mask_size_1 & atomics_a_mask_eq_8; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_8 = atomics_a_mask_sub_0_1_1 | _atomics_a_mask_acc_T_8; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_9 = atomics_a_mask_sub_0_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_9 = atomics_a_mask_size_1 & atomics_a_mask_eq_9; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_9 = atomics_a_mask_sub_0_1_1 | _atomics_a_mask_acc_T_9; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_10 = atomics_a_mask_sub_1_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_10 = atomics_a_mask_size_1 & atomics_a_mask_eq_10; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_10 = atomics_a_mask_sub_1_1_1 | _atomics_a_mask_acc_T_10; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_11 = atomics_a_mask_sub_1_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_11 = atomics_a_mask_size_1 & atomics_a_mask_eq_11; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_11 = atomics_a_mask_sub_1_1_1 | _atomics_a_mask_acc_T_11; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_12 = atomics_a_mask_sub_2_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_12 = atomics_a_mask_size_1 & atomics_a_mask_eq_12; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_12 = atomics_a_mask_sub_2_1_1 | _atomics_a_mask_acc_T_12; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_13 = atomics_a_mask_sub_2_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_13 = atomics_a_mask_size_1 & atomics_a_mask_eq_13; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_13 = atomics_a_mask_sub_2_1_1 | _atomics_a_mask_acc_T_13; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_14 = atomics_a_mask_sub_3_2_1 & atomics_a_mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_14 = atomics_a_mask_size_1 & atomics_a_mask_eq_14; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_14 = atomics_a_mask_sub_3_1_1 | _atomics_a_mask_acc_T_14; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_15 = atomics_a_mask_sub_3_2_1 & atomics_a_mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_15 = atomics_a_mask_size_1 & atomics_a_mask_eq_15; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_15 = atomics_a_mask_sub_3_1_1 | _atomics_a_mask_acc_T_15; // @[Misc.scala:215:{29,38}]
wire [1:0] atomics_a_mask_lo_lo_1 = {atomics_a_mask_acc_9, atomics_a_mask_acc_8}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_lo_hi_1 = {atomics_a_mask_acc_11, atomics_a_mask_acc_10}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_lo_1 = {atomics_a_mask_lo_hi_1, atomics_a_mask_lo_lo_1}; // @[Misc.scala:222:10]
wire [1:0] atomics_a_mask_hi_lo_1 = {atomics_a_mask_acc_13, atomics_a_mask_acc_12}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_hi_hi_1 = {atomics_a_mask_acc_15, atomics_a_mask_acc_14}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_hi_1 = {atomics_a_mask_hi_hi_1, atomics_a_mask_hi_lo_1}; // @[Misc.scala:222:10]
assign _atomics_a_mask_T_1 = {atomics_a_mask_hi_1, atomics_a_mask_lo_1}; // @[Misc.scala:222:10]
assign atomics_a_1_mask = _atomics_a_mask_T_1; // @[Misc.scala:222:10]
wire [40:0] _atomics_legal_T_125 = {1'h0, _atomics_legal_T_124}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_126 = _atomics_legal_T_125 & 41'hFFFD8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_127 = _atomics_legal_T_126; // @[Parameters.scala:137:46]
wire _atomics_legal_T_128 = _atomics_legal_T_127 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_130 = {1'h0, _atomics_legal_T_129}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_131 = _atomics_legal_T_130 & 41'hFFFE9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_132 = _atomics_legal_T_131; // @[Parameters.scala:137:46]
wire _atomics_legal_T_133 = _atomics_legal_T_132 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_135 = {1'h0, _atomics_legal_T_134}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_136 = _atomics_legal_T_135 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_137 = _atomics_legal_T_136; // @[Parameters.scala:137:46]
wire _atomics_legal_T_138 = _atomics_legal_T_137 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_140 = {1'h0, _atomics_legal_T_139}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_141 = _atomics_legal_T_140 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_142 = _atomics_legal_T_141; // @[Parameters.scala:137:46]
wire _atomics_legal_T_143 = _atomics_legal_T_142 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_145 = {1'h0, _atomics_legal_T_144}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_146 = _atomics_legal_T_145 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_147 = _atomics_legal_T_146; // @[Parameters.scala:137:46]
wire _atomics_legal_T_148 = _atomics_legal_T_147 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_150 = {1'h0, _atomics_legal_T_149}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_151 = _atomics_legal_T_150 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_152 = _atomics_legal_T_151; // @[Parameters.scala:137:46]
wire _atomics_legal_T_153 = _atomics_legal_T_152 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_155 = {1'h0, _atomics_legal_T_154}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_156 = _atomics_legal_T_155 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_157 = _atomics_legal_T_156; // @[Parameters.scala:137:46]
wire _atomics_legal_T_158 = _atomics_legal_T_157 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_160 = {1'h0, _atomics_legal_T_159}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_161 = _atomics_legal_T_160 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_162 = _atomics_legal_T_161; // @[Parameters.scala:137:46]
wire _atomics_legal_T_163 = _atomics_legal_T_162 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _atomics_legal_T_164 = _atomics_legal_T_128 | _atomics_legal_T_133; // @[Parameters.scala:685:42]
wire _atomics_legal_T_165 = _atomics_legal_T_164 | _atomics_legal_T_138; // @[Parameters.scala:685:42]
wire _atomics_legal_T_166 = _atomics_legal_T_165 | _atomics_legal_T_143; // @[Parameters.scala:685:42]
wire _atomics_legal_T_167 = _atomics_legal_T_166 | _atomics_legal_T_148; // @[Parameters.scala:685:42]
wire _atomics_legal_T_168 = _atomics_legal_T_167 | _atomics_legal_T_153; // @[Parameters.scala:685:42]
wire _atomics_legal_T_169 = _atomics_legal_T_168 | _atomics_legal_T_158; // @[Parameters.scala:685:42]
wire _atomics_legal_T_170 = _atomics_legal_T_169 | _atomics_legal_T_163; // @[Parameters.scala:685:42]
wire _atomics_legal_T_171 = _atomics_legal_T_170; // @[Parameters.scala:684:54, :685:42]
wire _atomics_legal_T_179 = _atomics_legal_T_171; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _atomics_legal_T_174 = {1'h0, _atomics_legal_T_173}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_175 = _atomics_legal_T_174 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_176 = _atomics_legal_T_175; // @[Parameters.scala:137:46]
wire _atomics_legal_T_177 = _atomics_legal_T_176 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire atomics_legal_2 = _atomics_legal_T_179; // @[Parameters.scala:686:26]
wire [7:0] _atomics_a_mask_T_2; // @[Misc.scala:222:10]
wire [7:0] atomics_a_2_mask; // @[Edges.scala:534:17]
wire [1:0] atomics_a_mask_sizeOH_shiftAmount_2 = _atomics_a_mask_sizeOH_T_6[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _atomics_a_mask_sizeOH_T_7 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_2; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _atomics_a_mask_sizeOH_T_8 = _atomics_a_mask_sizeOH_T_7[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] atomics_a_mask_sizeOH_2 = {_atomics_a_mask_sizeOH_T_8[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire atomics_a_mask_sub_sub_sub_0_1_2 = &s2_req_size; // @[Misc.scala:206:21]
wire atomics_a_mask_sub_sub_size_2 = atomics_a_mask_sizeOH_2[2]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_sub_1_2_2 = atomics_a_mask_sub_sub_bit_2; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_sub_nbit_2 = ~atomics_a_mask_sub_sub_bit_2; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_sub_0_2_2 = atomics_a_mask_sub_sub_nbit_2; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_sub_acc_T_4 = atomics_a_mask_sub_sub_size_2 & atomics_a_mask_sub_sub_0_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_0_1_2 = atomics_a_mask_sub_sub_sub_0_1_2 | _atomics_a_mask_sub_sub_acc_T_4; // @[Misc.scala:206:21, :215:{29,38}]
wire _atomics_a_mask_sub_sub_acc_T_5 = atomics_a_mask_sub_sub_size_2 & atomics_a_mask_sub_sub_1_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_1_1_2 = atomics_a_mask_sub_sub_sub_0_1_2 | _atomics_a_mask_sub_sub_acc_T_5; // @[Misc.scala:206:21, :215:{29,38}]
wire atomics_a_mask_sub_size_2 = atomics_a_mask_sizeOH_2[1]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_nbit_2 = ~atomics_a_mask_sub_bit_2; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_0_2_2 = atomics_a_mask_sub_sub_0_2_2 & atomics_a_mask_sub_nbit_2; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_8 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_0_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_0_1_2 = atomics_a_mask_sub_sub_0_1_2 | _atomics_a_mask_sub_acc_T_8; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_1_2_2 = atomics_a_mask_sub_sub_0_2_2 & atomics_a_mask_sub_bit_2; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_9 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_1_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_1_1_2 = atomics_a_mask_sub_sub_0_1_2 | _atomics_a_mask_sub_acc_T_9; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_2_2_2 = atomics_a_mask_sub_sub_1_2_2 & atomics_a_mask_sub_nbit_2; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_10 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_2_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_2_1_2 = atomics_a_mask_sub_sub_1_1_2 | _atomics_a_mask_sub_acc_T_10; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_3_2_2 = atomics_a_mask_sub_sub_1_2_2 & atomics_a_mask_sub_bit_2; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_11 = atomics_a_mask_sub_size_2 & atomics_a_mask_sub_3_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_3_1_2 = atomics_a_mask_sub_sub_1_1_2 | _atomics_a_mask_sub_acc_T_11; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_size_2 = atomics_a_mask_sizeOH_2[0]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_nbit_2 = ~atomics_a_mask_bit_2; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_eq_16 = atomics_a_mask_sub_0_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_16 = atomics_a_mask_size_2 & atomics_a_mask_eq_16; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_16 = atomics_a_mask_sub_0_1_2 | _atomics_a_mask_acc_T_16; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_17 = atomics_a_mask_sub_0_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_17 = atomics_a_mask_size_2 & atomics_a_mask_eq_17; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_17 = atomics_a_mask_sub_0_1_2 | _atomics_a_mask_acc_T_17; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_18 = atomics_a_mask_sub_1_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_18 = atomics_a_mask_size_2 & atomics_a_mask_eq_18; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_18 = atomics_a_mask_sub_1_1_2 | _atomics_a_mask_acc_T_18; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_19 = atomics_a_mask_sub_1_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_19 = atomics_a_mask_size_2 & atomics_a_mask_eq_19; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_19 = atomics_a_mask_sub_1_1_2 | _atomics_a_mask_acc_T_19; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_20 = atomics_a_mask_sub_2_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_20 = atomics_a_mask_size_2 & atomics_a_mask_eq_20; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_20 = atomics_a_mask_sub_2_1_2 | _atomics_a_mask_acc_T_20; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_21 = atomics_a_mask_sub_2_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_21 = atomics_a_mask_size_2 & atomics_a_mask_eq_21; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_21 = atomics_a_mask_sub_2_1_2 | _atomics_a_mask_acc_T_21; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_22 = atomics_a_mask_sub_3_2_2 & atomics_a_mask_nbit_2; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_22 = atomics_a_mask_size_2 & atomics_a_mask_eq_22; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_22 = atomics_a_mask_sub_3_1_2 | _atomics_a_mask_acc_T_22; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_23 = atomics_a_mask_sub_3_2_2 & atomics_a_mask_bit_2; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_23 = atomics_a_mask_size_2 & atomics_a_mask_eq_23; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_23 = atomics_a_mask_sub_3_1_2 | _atomics_a_mask_acc_T_23; // @[Misc.scala:215:{29,38}]
wire [1:0] atomics_a_mask_lo_lo_2 = {atomics_a_mask_acc_17, atomics_a_mask_acc_16}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_lo_hi_2 = {atomics_a_mask_acc_19, atomics_a_mask_acc_18}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_lo_2 = {atomics_a_mask_lo_hi_2, atomics_a_mask_lo_lo_2}; // @[Misc.scala:222:10]
wire [1:0] atomics_a_mask_hi_lo_2 = {atomics_a_mask_acc_21, atomics_a_mask_acc_20}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_hi_hi_2 = {atomics_a_mask_acc_23, atomics_a_mask_acc_22}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_hi_2 = {atomics_a_mask_hi_hi_2, atomics_a_mask_hi_lo_2}; // @[Misc.scala:222:10]
assign _atomics_a_mask_T_2 = {atomics_a_mask_hi_2, atomics_a_mask_lo_2}; // @[Misc.scala:222:10]
assign atomics_a_2_mask = _atomics_a_mask_T_2; // @[Misc.scala:222:10]
wire [40:0] _atomics_legal_T_185 = {1'h0, _atomics_legal_T_184}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_186 = _atomics_legal_T_185 & 41'hFFFD8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_187 = _atomics_legal_T_186; // @[Parameters.scala:137:46]
wire _atomics_legal_T_188 = _atomics_legal_T_187 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_190 = {1'h0, _atomics_legal_T_189}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_191 = _atomics_legal_T_190 & 41'hFFFE9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_192 = _atomics_legal_T_191; // @[Parameters.scala:137:46]
wire _atomics_legal_T_193 = _atomics_legal_T_192 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_195 = {1'h0, _atomics_legal_T_194}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_196 = _atomics_legal_T_195 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_197 = _atomics_legal_T_196; // @[Parameters.scala:137:46]
wire _atomics_legal_T_198 = _atomics_legal_T_197 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_200 = {1'h0, _atomics_legal_T_199}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_201 = _atomics_legal_T_200 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_202 = _atomics_legal_T_201; // @[Parameters.scala:137:46]
wire _atomics_legal_T_203 = _atomics_legal_T_202 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_205 = {1'h0, _atomics_legal_T_204}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_206 = _atomics_legal_T_205 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_207 = _atomics_legal_T_206; // @[Parameters.scala:137:46]
wire _atomics_legal_T_208 = _atomics_legal_T_207 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_210 = {1'h0, _atomics_legal_T_209}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_211 = _atomics_legal_T_210 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_212 = _atomics_legal_T_211; // @[Parameters.scala:137:46]
wire _atomics_legal_T_213 = _atomics_legal_T_212 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_215 = {1'h0, _atomics_legal_T_214}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_216 = _atomics_legal_T_215 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_217 = _atomics_legal_T_216; // @[Parameters.scala:137:46]
wire _atomics_legal_T_218 = _atomics_legal_T_217 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_220 = {1'h0, _atomics_legal_T_219}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_221 = _atomics_legal_T_220 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_222 = _atomics_legal_T_221; // @[Parameters.scala:137:46]
wire _atomics_legal_T_223 = _atomics_legal_T_222 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _atomics_legal_T_224 = _atomics_legal_T_188 | _atomics_legal_T_193; // @[Parameters.scala:685:42]
wire _atomics_legal_T_225 = _atomics_legal_T_224 | _atomics_legal_T_198; // @[Parameters.scala:685:42]
wire _atomics_legal_T_226 = _atomics_legal_T_225 | _atomics_legal_T_203; // @[Parameters.scala:685:42]
wire _atomics_legal_T_227 = _atomics_legal_T_226 | _atomics_legal_T_208; // @[Parameters.scala:685:42]
wire _atomics_legal_T_228 = _atomics_legal_T_227 | _atomics_legal_T_213; // @[Parameters.scala:685:42]
wire _atomics_legal_T_229 = _atomics_legal_T_228 | _atomics_legal_T_218; // @[Parameters.scala:685:42]
wire _atomics_legal_T_230 = _atomics_legal_T_229 | _atomics_legal_T_223; // @[Parameters.scala:685:42]
wire _atomics_legal_T_231 = _atomics_legal_T_230; // @[Parameters.scala:684:54, :685:42]
wire _atomics_legal_T_239 = _atomics_legal_T_231; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _atomics_legal_T_234 = {1'h0, _atomics_legal_T_233}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_235 = _atomics_legal_T_234 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_236 = _atomics_legal_T_235; // @[Parameters.scala:137:46]
wire _atomics_legal_T_237 = _atomics_legal_T_236 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire atomics_legal_3 = _atomics_legal_T_239; // @[Parameters.scala:686:26]
wire [7:0] _atomics_a_mask_T_3; // @[Misc.scala:222:10]
wire [7:0] atomics_a_3_mask; // @[Edges.scala:534:17]
wire [1:0] atomics_a_mask_sizeOH_shiftAmount_3 = _atomics_a_mask_sizeOH_T_9[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _atomics_a_mask_sizeOH_T_10 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_3; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _atomics_a_mask_sizeOH_T_11 = _atomics_a_mask_sizeOH_T_10[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] atomics_a_mask_sizeOH_3 = {_atomics_a_mask_sizeOH_T_11[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire atomics_a_mask_sub_sub_sub_0_1_3 = &s2_req_size; // @[Misc.scala:206:21]
wire atomics_a_mask_sub_sub_size_3 = atomics_a_mask_sizeOH_3[2]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_sub_1_2_3 = atomics_a_mask_sub_sub_bit_3; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_sub_nbit_3 = ~atomics_a_mask_sub_sub_bit_3; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_sub_0_2_3 = atomics_a_mask_sub_sub_nbit_3; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_sub_acc_T_6 = atomics_a_mask_sub_sub_size_3 & atomics_a_mask_sub_sub_0_2_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_0_1_3 = atomics_a_mask_sub_sub_sub_0_1_3 | _atomics_a_mask_sub_sub_acc_T_6; // @[Misc.scala:206:21, :215:{29,38}]
wire _atomics_a_mask_sub_sub_acc_T_7 = atomics_a_mask_sub_sub_size_3 & atomics_a_mask_sub_sub_1_2_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_1_1_3 = atomics_a_mask_sub_sub_sub_0_1_3 | _atomics_a_mask_sub_sub_acc_T_7; // @[Misc.scala:206:21, :215:{29,38}]
wire atomics_a_mask_sub_size_3 = atomics_a_mask_sizeOH_3[1]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_nbit_3 = ~atomics_a_mask_sub_bit_3; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_0_2_3 = atomics_a_mask_sub_sub_0_2_3 & atomics_a_mask_sub_nbit_3; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_12 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_0_2_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_0_1_3 = atomics_a_mask_sub_sub_0_1_3 | _atomics_a_mask_sub_acc_T_12; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_1_2_3 = atomics_a_mask_sub_sub_0_2_3 & atomics_a_mask_sub_bit_3; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_13 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_1_2_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_1_1_3 = atomics_a_mask_sub_sub_0_1_3 | _atomics_a_mask_sub_acc_T_13; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_2_2_3 = atomics_a_mask_sub_sub_1_2_3 & atomics_a_mask_sub_nbit_3; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_14 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_2_2_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_2_1_3 = atomics_a_mask_sub_sub_1_1_3 | _atomics_a_mask_sub_acc_T_14; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_3_2_3 = atomics_a_mask_sub_sub_1_2_3 & atomics_a_mask_sub_bit_3; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_15 = atomics_a_mask_sub_size_3 & atomics_a_mask_sub_3_2_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_3_1_3 = atomics_a_mask_sub_sub_1_1_3 | _atomics_a_mask_sub_acc_T_15; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_size_3 = atomics_a_mask_sizeOH_3[0]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_nbit_3 = ~atomics_a_mask_bit_3; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_eq_24 = atomics_a_mask_sub_0_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_24 = atomics_a_mask_size_3 & atomics_a_mask_eq_24; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_24 = atomics_a_mask_sub_0_1_3 | _atomics_a_mask_acc_T_24; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_25 = atomics_a_mask_sub_0_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_25 = atomics_a_mask_size_3 & atomics_a_mask_eq_25; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_25 = atomics_a_mask_sub_0_1_3 | _atomics_a_mask_acc_T_25; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_26 = atomics_a_mask_sub_1_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_26 = atomics_a_mask_size_3 & atomics_a_mask_eq_26; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_26 = atomics_a_mask_sub_1_1_3 | _atomics_a_mask_acc_T_26; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_27 = atomics_a_mask_sub_1_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_27 = atomics_a_mask_size_3 & atomics_a_mask_eq_27; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_27 = atomics_a_mask_sub_1_1_3 | _atomics_a_mask_acc_T_27; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_28 = atomics_a_mask_sub_2_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_28 = atomics_a_mask_size_3 & atomics_a_mask_eq_28; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_28 = atomics_a_mask_sub_2_1_3 | _atomics_a_mask_acc_T_28; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_29 = atomics_a_mask_sub_2_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_29 = atomics_a_mask_size_3 & atomics_a_mask_eq_29; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_29 = atomics_a_mask_sub_2_1_3 | _atomics_a_mask_acc_T_29; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_30 = atomics_a_mask_sub_3_2_3 & atomics_a_mask_nbit_3; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_30 = atomics_a_mask_size_3 & atomics_a_mask_eq_30; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_30 = atomics_a_mask_sub_3_1_3 | _atomics_a_mask_acc_T_30; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_31 = atomics_a_mask_sub_3_2_3 & atomics_a_mask_bit_3; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_31 = atomics_a_mask_size_3 & atomics_a_mask_eq_31; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_31 = atomics_a_mask_sub_3_1_3 | _atomics_a_mask_acc_T_31; // @[Misc.scala:215:{29,38}]
wire [1:0] atomics_a_mask_lo_lo_3 = {atomics_a_mask_acc_25, atomics_a_mask_acc_24}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_lo_hi_3 = {atomics_a_mask_acc_27, atomics_a_mask_acc_26}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_lo_3 = {atomics_a_mask_lo_hi_3, atomics_a_mask_lo_lo_3}; // @[Misc.scala:222:10]
wire [1:0] atomics_a_mask_hi_lo_3 = {atomics_a_mask_acc_29, atomics_a_mask_acc_28}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_hi_hi_3 = {atomics_a_mask_acc_31, atomics_a_mask_acc_30}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_hi_3 = {atomics_a_mask_hi_hi_3, atomics_a_mask_hi_lo_3}; // @[Misc.scala:222:10]
assign _atomics_a_mask_T_3 = {atomics_a_mask_hi_3, atomics_a_mask_lo_3}; // @[Misc.scala:222:10]
assign atomics_a_3_mask = _atomics_a_mask_T_3; // @[Misc.scala:222:10]
wire [40:0] _atomics_legal_T_245 = {1'h0, _atomics_legal_T_244}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_246 = _atomics_legal_T_245 & 41'hFFFD8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_247 = _atomics_legal_T_246; // @[Parameters.scala:137:46]
wire _atomics_legal_T_248 = _atomics_legal_T_247 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_250 = {1'h0, _atomics_legal_T_249}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_251 = _atomics_legal_T_250 & 41'hFFFE9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_252 = _atomics_legal_T_251; // @[Parameters.scala:137:46]
wire _atomics_legal_T_253 = _atomics_legal_T_252 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_255 = {1'h0, _atomics_legal_T_254}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_256 = _atomics_legal_T_255 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_257 = _atomics_legal_T_256; // @[Parameters.scala:137:46]
wire _atomics_legal_T_258 = _atomics_legal_T_257 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_260 = {1'h0, _atomics_legal_T_259}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_261 = _atomics_legal_T_260 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_262 = _atomics_legal_T_261; // @[Parameters.scala:137:46]
wire _atomics_legal_T_263 = _atomics_legal_T_262 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_265 = {1'h0, _atomics_legal_T_264}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_266 = _atomics_legal_T_265 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_267 = _atomics_legal_T_266; // @[Parameters.scala:137:46]
wire _atomics_legal_T_268 = _atomics_legal_T_267 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_270 = {1'h0, _atomics_legal_T_269}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_271 = _atomics_legal_T_270 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_272 = _atomics_legal_T_271; // @[Parameters.scala:137:46]
wire _atomics_legal_T_273 = _atomics_legal_T_272 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_275 = {1'h0, _atomics_legal_T_274}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_276 = _atomics_legal_T_275 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_277 = _atomics_legal_T_276; // @[Parameters.scala:137:46]
wire _atomics_legal_T_278 = _atomics_legal_T_277 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_280 = {1'h0, _atomics_legal_T_279}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_281 = _atomics_legal_T_280 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_282 = _atomics_legal_T_281; // @[Parameters.scala:137:46]
wire _atomics_legal_T_283 = _atomics_legal_T_282 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _atomics_legal_T_284 = _atomics_legal_T_248 | _atomics_legal_T_253; // @[Parameters.scala:685:42]
wire _atomics_legal_T_285 = _atomics_legal_T_284 | _atomics_legal_T_258; // @[Parameters.scala:685:42]
wire _atomics_legal_T_286 = _atomics_legal_T_285 | _atomics_legal_T_263; // @[Parameters.scala:685:42]
wire _atomics_legal_T_287 = _atomics_legal_T_286 | _atomics_legal_T_268; // @[Parameters.scala:685:42]
wire _atomics_legal_T_288 = _atomics_legal_T_287 | _atomics_legal_T_273; // @[Parameters.scala:685:42]
wire _atomics_legal_T_289 = _atomics_legal_T_288 | _atomics_legal_T_278; // @[Parameters.scala:685:42]
wire _atomics_legal_T_290 = _atomics_legal_T_289 | _atomics_legal_T_283; // @[Parameters.scala:685:42]
wire _atomics_legal_T_291 = _atomics_legal_T_290; // @[Parameters.scala:684:54, :685:42]
wire _atomics_legal_T_299 = _atomics_legal_T_291; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _atomics_legal_T_294 = {1'h0, _atomics_legal_T_293}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_295 = _atomics_legal_T_294 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_296 = _atomics_legal_T_295; // @[Parameters.scala:137:46]
wire _atomics_legal_T_297 = _atomics_legal_T_296 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire atomics_legal_4 = _atomics_legal_T_299; // @[Parameters.scala:686:26]
wire [7:0] _atomics_a_mask_T_4; // @[Misc.scala:222:10]
wire [7:0] atomics_a_4_mask; // @[Edges.scala:517:17]
wire [1:0] atomics_a_mask_sizeOH_shiftAmount_4 = _atomics_a_mask_sizeOH_T_12[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _atomics_a_mask_sizeOH_T_13 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_4; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _atomics_a_mask_sizeOH_T_14 = _atomics_a_mask_sizeOH_T_13[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] atomics_a_mask_sizeOH_4 = {_atomics_a_mask_sizeOH_T_14[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire atomics_a_mask_sub_sub_sub_0_1_4 = &s2_req_size; // @[Misc.scala:206:21]
wire atomics_a_mask_sub_sub_size_4 = atomics_a_mask_sizeOH_4[2]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_sub_1_2_4 = atomics_a_mask_sub_sub_bit_4; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_sub_nbit_4 = ~atomics_a_mask_sub_sub_bit_4; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_sub_0_2_4 = atomics_a_mask_sub_sub_nbit_4; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_sub_acc_T_8 = atomics_a_mask_sub_sub_size_4 & atomics_a_mask_sub_sub_0_2_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_0_1_4 = atomics_a_mask_sub_sub_sub_0_1_4 | _atomics_a_mask_sub_sub_acc_T_8; // @[Misc.scala:206:21, :215:{29,38}]
wire _atomics_a_mask_sub_sub_acc_T_9 = atomics_a_mask_sub_sub_size_4 & atomics_a_mask_sub_sub_1_2_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_1_1_4 = atomics_a_mask_sub_sub_sub_0_1_4 | _atomics_a_mask_sub_sub_acc_T_9; // @[Misc.scala:206:21, :215:{29,38}]
wire atomics_a_mask_sub_size_4 = atomics_a_mask_sizeOH_4[1]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_nbit_4 = ~atomics_a_mask_sub_bit_4; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_0_2_4 = atomics_a_mask_sub_sub_0_2_4 & atomics_a_mask_sub_nbit_4; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_16 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_0_2_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_0_1_4 = atomics_a_mask_sub_sub_0_1_4 | _atomics_a_mask_sub_acc_T_16; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_1_2_4 = atomics_a_mask_sub_sub_0_2_4 & atomics_a_mask_sub_bit_4; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_17 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_1_2_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_1_1_4 = atomics_a_mask_sub_sub_0_1_4 | _atomics_a_mask_sub_acc_T_17; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_2_2_4 = atomics_a_mask_sub_sub_1_2_4 & atomics_a_mask_sub_nbit_4; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_18 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_2_2_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_2_1_4 = atomics_a_mask_sub_sub_1_1_4 | _atomics_a_mask_sub_acc_T_18; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_3_2_4 = atomics_a_mask_sub_sub_1_2_4 & atomics_a_mask_sub_bit_4; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_19 = atomics_a_mask_sub_size_4 & atomics_a_mask_sub_3_2_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_3_1_4 = atomics_a_mask_sub_sub_1_1_4 | _atomics_a_mask_sub_acc_T_19; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_size_4 = atomics_a_mask_sizeOH_4[0]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_nbit_4 = ~atomics_a_mask_bit_4; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_eq_32 = atomics_a_mask_sub_0_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_32 = atomics_a_mask_size_4 & atomics_a_mask_eq_32; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_32 = atomics_a_mask_sub_0_1_4 | _atomics_a_mask_acc_T_32; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_33 = atomics_a_mask_sub_0_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_33 = atomics_a_mask_size_4 & atomics_a_mask_eq_33; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_33 = atomics_a_mask_sub_0_1_4 | _atomics_a_mask_acc_T_33; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_34 = atomics_a_mask_sub_1_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_34 = atomics_a_mask_size_4 & atomics_a_mask_eq_34; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_34 = atomics_a_mask_sub_1_1_4 | _atomics_a_mask_acc_T_34; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_35 = atomics_a_mask_sub_1_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_35 = atomics_a_mask_size_4 & atomics_a_mask_eq_35; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_35 = atomics_a_mask_sub_1_1_4 | _atomics_a_mask_acc_T_35; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_36 = atomics_a_mask_sub_2_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_36 = atomics_a_mask_size_4 & atomics_a_mask_eq_36; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_36 = atomics_a_mask_sub_2_1_4 | _atomics_a_mask_acc_T_36; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_37 = atomics_a_mask_sub_2_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_37 = atomics_a_mask_size_4 & atomics_a_mask_eq_37; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_37 = atomics_a_mask_sub_2_1_4 | _atomics_a_mask_acc_T_37; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_38 = atomics_a_mask_sub_3_2_4 & atomics_a_mask_nbit_4; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_38 = atomics_a_mask_size_4 & atomics_a_mask_eq_38; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_38 = atomics_a_mask_sub_3_1_4 | _atomics_a_mask_acc_T_38; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_39 = atomics_a_mask_sub_3_2_4 & atomics_a_mask_bit_4; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_39 = atomics_a_mask_size_4 & atomics_a_mask_eq_39; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_39 = atomics_a_mask_sub_3_1_4 | _atomics_a_mask_acc_T_39; // @[Misc.scala:215:{29,38}]
wire [1:0] atomics_a_mask_lo_lo_4 = {atomics_a_mask_acc_33, atomics_a_mask_acc_32}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_lo_hi_4 = {atomics_a_mask_acc_35, atomics_a_mask_acc_34}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_lo_4 = {atomics_a_mask_lo_hi_4, atomics_a_mask_lo_lo_4}; // @[Misc.scala:222:10]
wire [1:0] atomics_a_mask_hi_lo_4 = {atomics_a_mask_acc_37, atomics_a_mask_acc_36}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_hi_hi_4 = {atomics_a_mask_acc_39, atomics_a_mask_acc_38}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_hi_4 = {atomics_a_mask_hi_hi_4, atomics_a_mask_hi_lo_4}; // @[Misc.scala:222:10]
assign _atomics_a_mask_T_4 = {atomics_a_mask_hi_4, atomics_a_mask_lo_4}; // @[Misc.scala:222:10]
assign atomics_a_4_mask = _atomics_a_mask_T_4; // @[Misc.scala:222:10]
wire [40:0] _atomics_legal_T_305 = {1'h0, _atomics_legal_T_304}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_306 = _atomics_legal_T_305 & 41'hFFFD8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_307 = _atomics_legal_T_306; // @[Parameters.scala:137:46]
wire _atomics_legal_T_308 = _atomics_legal_T_307 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_310 = {1'h0, _atomics_legal_T_309}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_311 = _atomics_legal_T_310 & 41'hFFFE9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_312 = _atomics_legal_T_311; // @[Parameters.scala:137:46]
wire _atomics_legal_T_313 = _atomics_legal_T_312 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_315 = {1'h0, _atomics_legal_T_314}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_316 = _atomics_legal_T_315 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_317 = _atomics_legal_T_316; // @[Parameters.scala:137:46]
wire _atomics_legal_T_318 = _atomics_legal_T_317 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_320 = {1'h0, _atomics_legal_T_319}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_321 = _atomics_legal_T_320 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_322 = _atomics_legal_T_321; // @[Parameters.scala:137:46]
wire _atomics_legal_T_323 = _atomics_legal_T_322 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_325 = {1'h0, _atomics_legal_T_324}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_326 = _atomics_legal_T_325 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_327 = _atomics_legal_T_326; // @[Parameters.scala:137:46]
wire _atomics_legal_T_328 = _atomics_legal_T_327 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_330 = {1'h0, _atomics_legal_T_329}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_331 = _atomics_legal_T_330 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_332 = _atomics_legal_T_331; // @[Parameters.scala:137:46]
wire _atomics_legal_T_333 = _atomics_legal_T_332 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_335 = {1'h0, _atomics_legal_T_334}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_336 = _atomics_legal_T_335 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_337 = _atomics_legal_T_336; // @[Parameters.scala:137:46]
wire _atomics_legal_T_338 = _atomics_legal_T_337 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_340 = {1'h0, _atomics_legal_T_339}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_341 = _atomics_legal_T_340 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_342 = _atomics_legal_T_341; // @[Parameters.scala:137:46]
wire _atomics_legal_T_343 = _atomics_legal_T_342 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _atomics_legal_T_344 = _atomics_legal_T_308 | _atomics_legal_T_313; // @[Parameters.scala:685:42]
wire _atomics_legal_T_345 = _atomics_legal_T_344 | _atomics_legal_T_318; // @[Parameters.scala:685:42]
wire _atomics_legal_T_346 = _atomics_legal_T_345 | _atomics_legal_T_323; // @[Parameters.scala:685:42]
wire _atomics_legal_T_347 = _atomics_legal_T_346 | _atomics_legal_T_328; // @[Parameters.scala:685:42]
wire _atomics_legal_T_348 = _atomics_legal_T_347 | _atomics_legal_T_333; // @[Parameters.scala:685:42]
wire _atomics_legal_T_349 = _atomics_legal_T_348 | _atomics_legal_T_338; // @[Parameters.scala:685:42]
wire _atomics_legal_T_350 = _atomics_legal_T_349 | _atomics_legal_T_343; // @[Parameters.scala:685:42]
wire _atomics_legal_T_351 = _atomics_legal_T_350; // @[Parameters.scala:684:54, :685:42]
wire _atomics_legal_T_359 = _atomics_legal_T_351; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _atomics_legal_T_354 = {1'h0, _atomics_legal_T_353}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_355 = _atomics_legal_T_354 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_356 = _atomics_legal_T_355; // @[Parameters.scala:137:46]
wire _atomics_legal_T_357 = _atomics_legal_T_356 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire atomics_legal_5 = _atomics_legal_T_359; // @[Parameters.scala:686:26]
wire [7:0] _atomics_a_mask_T_5; // @[Misc.scala:222:10]
wire [7:0] atomics_a_5_mask; // @[Edges.scala:517:17]
wire [1:0] atomics_a_mask_sizeOH_shiftAmount_5 = _atomics_a_mask_sizeOH_T_15[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _atomics_a_mask_sizeOH_T_16 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_5; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _atomics_a_mask_sizeOH_T_17 = _atomics_a_mask_sizeOH_T_16[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] atomics_a_mask_sizeOH_5 = {_atomics_a_mask_sizeOH_T_17[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire atomics_a_mask_sub_sub_sub_0_1_5 = &s2_req_size; // @[Misc.scala:206:21]
wire atomics_a_mask_sub_sub_size_5 = atomics_a_mask_sizeOH_5[2]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_sub_1_2_5 = atomics_a_mask_sub_sub_bit_5; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_sub_nbit_5 = ~atomics_a_mask_sub_sub_bit_5; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_sub_0_2_5 = atomics_a_mask_sub_sub_nbit_5; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_sub_acc_T_10 = atomics_a_mask_sub_sub_size_5 & atomics_a_mask_sub_sub_0_2_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_0_1_5 = atomics_a_mask_sub_sub_sub_0_1_5 | _atomics_a_mask_sub_sub_acc_T_10; // @[Misc.scala:206:21, :215:{29,38}]
wire _atomics_a_mask_sub_sub_acc_T_11 = atomics_a_mask_sub_sub_size_5 & atomics_a_mask_sub_sub_1_2_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_1_1_5 = atomics_a_mask_sub_sub_sub_0_1_5 | _atomics_a_mask_sub_sub_acc_T_11; // @[Misc.scala:206:21, :215:{29,38}]
wire atomics_a_mask_sub_size_5 = atomics_a_mask_sizeOH_5[1]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_nbit_5 = ~atomics_a_mask_sub_bit_5; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_0_2_5 = atomics_a_mask_sub_sub_0_2_5 & atomics_a_mask_sub_nbit_5; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_20 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_0_2_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_0_1_5 = atomics_a_mask_sub_sub_0_1_5 | _atomics_a_mask_sub_acc_T_20; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_1_2_5 = atomics_a_mask_sub_sub_0_2_5 & atomics_a_mask_sub_bit_5; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_21 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_1_2_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_1_1_5 = atomics_a_mask_sub_sub_0_1_5 | _atomics_a_mask_sub_acc_T_21; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_2_2_5 = atomics_a_mask_sub_sub_1_2_5 & atomics_a_mask_sub_nbit_5; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_22 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_2_2_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_2_1_5 = atomics_a_mask_sub_sub_1_1_5 | _atomics_a_mask_sub_acc_T_22; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_3_2_5 = atomics_a_mask_sub_sub_1_2_5 & atomics_a_mask_sub_bit_5; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_23 = atomics_a_mask_sub_size_5 & atomics_a_mask_sub_3_2_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_3_1_5 = atomics_a_mask_sub_sub_1_1_5 | _atomics_a_mask_sub_acc_T_23; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_size_5 = atomics_a_mask_sizeOH_5[0]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_nbit_5 = ~atomics_a_mask_bit_5; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_eq_40 = atomics_a_mask_sub_0_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_40 = atomics_a_mask_size_5 & atomics_a_mask_eq_40; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_40 = atomics_a_mask_sub_0_1_5 | _atomics_a_mask_acc_T_40; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_41 = atomics_a_mask_sub_0_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_41 = atomics_a_mask_size_5 & atomics_a_mask_eq_41; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_41 = atomics_a_mask_sub_0_1_5 | _atomics_a_mask_acc_T_41; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_42 = atomics_a_mask_sub_1_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_42 = atomics_a_mask_size_5 & atomics_a_mask_eq_42; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_42 = atomics_a_mask_sub_1_1_5 | _atomics_a_mask_acc_T_42; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_43 = atomics_a_mask_sub_1_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_43 = atomics_a_mask_size_5 & atomics_a_mask_eq_43; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_43 = atomics_a_mask_sub_1_1_5 | _atomics_a_mask_acc_T_43; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_44 = atomics_a_mask_sub_2_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_44 = atomics_a_mask_size_5 & atomics_a_mask_eq_44; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_44 = atomics_a_mask_sub_2_1_5 | _atomics_a_mask_acc_T_44; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_45 = atomics_a_mask_sub_2_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_45 = atomics_a_mask_size_5 & atomics_a_mask_eq_45; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_45 = atomics_a_mask_sub_2_1_5 | _atomics_a_mask_acc_T_45; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_46 = atomics_a_mask_sub_3_2_5 & atomics_a_mask_nbit_5; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_46 = atomics_a_mask_size_5 & atomics_a_mask_eq_46; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_46 = atomics_a_mask_sub_3_1_5 | _atomics_a_mask_acc_T_46; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_47 = atomics_a_mask_sub_3_2_5 & atomics_a_mask_bit_5; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_47 = atomics_a_mask_size_5 & atomics_a_mask_eq_47; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_47 = atomics_a_mask_sub_3_1_5 | _atomics_a_mask_acc_T_47; // @[Misc.scala:215:{29,38}]
wire [1:0] atomics_a_mask_lo_lo_5 = {atomics_a_mask_acc_41, atomics_a_mask_acc_40}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_lo_hi_5 = {atomics_a_mask_acc_43, atomics_a_mask_acc_42}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_lo_5 = {atomics_a_mask_lo_hi_5, atomics_a_mask_lo_lo_5}; // @[Misc.scala:222:10]
wire [1:0] atomics_a_mask_hi_lo_5 = {atomics_a_mask_acc_45, atomics_a_mask_acc_44}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_hi_hi_5 = {atomics_a_mask_acc_47, atomics_a_mask_acc_46}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_hi_5 = {atomics_a_mask_hi_hi_5, atomics_a_mask_hi_lo_5}; // @[Misc.scala:222:10]
assign _atomics_a_mask_T_5 = {atomics_a_mask_hi_5, atomics_a_mask_lo_5}; // @[Misc.scala:222:10]
assign atomics_a_5_mask = _atomics_a_mask_T_5; // @[Misc.scala:222:10]
wire [40:0] _atomics_legal_T_365 = {1'h0, _atomics_legal_T_364}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_366 = _atomics_legal_T_365 & 41'hFFFD8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_367 = _atomics_legal_T_366; // @[Parameters.scala:137:46]
wire _atomics_legal_T_368 = _atomics_legal_T_367 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_370 = {1'h0, _atomics_legal_T_369}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_371 = _atomics_legal_T_370 & 41'hFFFE9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_372 = _atomics_legal_T_371; // @[Parameters.scala:137:46]
wire _atomics_legal_T_373 = _atomics_legal_T_372 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_375 = {1'h0, _atomics_legal_T_374}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_376 = _atomics_legal_T_375 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_377 = _atomics_legal_T_376; // @[Parameters.scala:137:46]
wire _atomics_legal_T_378 = _atomics_legal_T_377 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_380 = {1'h0, _atomics_legal_T_379}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_381 = _atomics_legal_T_380 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_382 = _atomics_legal_T_381; // @[Parameters.scala:137:46]
wire _atomics_legal_T_383 = _atomics_legal_T_382 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_385 = {1'h0, _atomics_legal_T_384}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_386 = _atomics_legal_T_385 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_387 = _atomics_legal_T_386; // @[Parameters.scala:137:46]
wire _atomics_legal_T_388 = _atomics_legal_T_387 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_390 = {1'h0, _atomics_legal_T_389}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_391 = _atomics_legal_T_390 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_392 = _atomics_legal_T_391; // @[Parameters.scala:137:46]
wire _atomics_legal_T_393 = _atomics_legal_T_392 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_395 = {1'h0, _atomics_legal_T_394}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_396 = _atomics_legal_T_395 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_397 = _atomics_legal_T_396; // @[Parameters.scala:137:46]
wire _atomics_legal_T_398 = _atomics_legal_T_397 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_400 = {1'h0, _atomics_legal_T_399}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_401 = _atomics_legal_T_400 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_402 = _atomics_legal_T_401; // @[Parameters.scala:137:46]
wire _atomics_legal_T_403 = _atomics_legal_T_402 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _atomics_legal_T_404 = _atomics_legal_T_368 | _atomics_legal_T_373; // @[Parameters.scala:685:42]
wire _atomics_legal_T_405 = _atomics_legal_T_404 | _atomics_legal_T_378; // @[Parameters.scala:685:42]
wire _atomics_legal_T_406 = _atomics_legal_T_405 | _atomics_legal_T_383; // @[Parameters.scala:685:42]
wire _atomics_legal_T_407 = _atomics_legal_T_406 | _atomics_legal_T_388; // @[Parameters.scala:685:42]
wire _atomics_legal_T_408 = _atomics_legal_T_407 | _atomics_legal_T_393; // @[Parameters.scala:685:42]
wire _atomics_legal_T_409 = _atomics_legal_T_408 | _atomics_legal_T_398; // @[Parameters.scala:685:42]
wire _atomics_legal_T_410 = _atomics_legal_T_409 | _atomics_legal_T_403; // @[Parameters.scala:685:42]
wire _atomics_legal_T_411 = _atomics_legal_T_410; // @[Parameters.scala:684:54, :685:42]
wire _atomics_legal_T_419 = _atomics_legal_T_411; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _atomics_legal_T_414 = {1'h0, _atomics_legal_T_413}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_415 = _atomics_legal_T_414 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_416 = _atomics_legal_T_415; // @[Parameters.scala:137:46]
wire _atomics_legal_T_417 = _atomics_legal_T_416 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire atomics_legal_6 = _atomics_legal_T_419; // @[Parameters.scala:686:26]
wire [7:0] _atomics_a_mask_T_6; // @[Misc.scala:222:10]
wire [7:0] atomics_a_6_mask; // @[Edges.scala:517:17]
wire [1:0] atomics_a_mask_sizeOH_shiftAmount_6 = _atomics_a_mask_sizeOH_T_18[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _atomics_a_mask_sizeOH_T_19 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_6; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _atomics_a_mask_sizeOH_T_20 = _atomics_a_mask_sizeOH_T_19[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] atomics_a_mask_sizeOH_6 = {_atomics_a_mask_sizeOH_T_20[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire atomics_a_mask_sub_sub_sub_0_1_6 = &s2_req_size; // @[Misc.scala:206:21]
wire atomics_a_mask_sub_sub_size_6 = atomics_a_mask_sizeOH_6[2]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_sub_1_2_6 = atomics_a_mask_sub_sub_bit_6; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_sub_nbit_6 = ~atomics_a_mask_sub_sub_bit_6; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_sub_0_2_6 = atomics_a_mask_sub_sub_nbit_6; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_sub_acc_T_12 = atomics_a_mask_sub_sub_size_6 & atomics_a_mask_sub_sub_0_2_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_0_1_6 = atomics_a_mask_sub_sub_sub_0_1_6 | _atomics_a_mask_sub_sub_acc_T_12; // @[Misc.scala:206:21, :215:{29,38}]
wire _atomics_a_mask_sub_sub_acc_T_13 = atomics_a_mask_sub_sub_size_6 & atomics_a_mask_sub_sub_1_2_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_1_1_6 = atomics_a_mask_sub_sub_sub_0_1_6 | _atomics_a_mask_sub_sub_acc_T_13; // @[Misc.scala:206:21, :215:{29,38}]
wire atomics_a_mask_sub_size_6 = atomics_a_mask_sizeOH_6[1]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_nbit_6 = ~atomics_a_mask_sub_bit_6; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_0_2_6 = atomics_a_mask_sub_sub_0_2_6 & atomics_a_mask_sub_nbit_6; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_24 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_0_2_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_0_1_6 = atomics_a_mask_sub_sub_0_1_6 | _atomics_a_mask_sub_acc_T_24; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_1_2_6 = atomics_a_mask_sub_sub_0_2_6 & atomics_a_mask_sub_bit_6; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_25 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_1_2_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_1_1_6 = atomics_a_mask_sub_sub_0_1_6 | _atomics_a_mask_sub_acc_T_25; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_2_2_6 = atomics_a_mask_sub_sub_1_2_6 & atomics_a_mask_sub_nbit_6; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_26 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_2_2_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_2_1_6 = atomics_a_mask_sub_sub_1_1_6 | _atomics_a_mask_sub_acc_T_26; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_3_2_6 = atomics_a_mask_sub_sub_1_2_6 & atomics_a_mask_sub_bit_6; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_27 = atomics_a_mask_sub_size_6 & atomics_a_mask_sub_3_2_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_3_1_6 = atomics_a_mask_sub_sub_1_1_6 | _atomics_a_mask_sub_acc_T_27; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_size_6 = atomics_a_mask_sizeOH_6[0]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_nbit_6 = ~atomics_a_mask_bit_6; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_eq_48 = atomics_a_mask_sub_0_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_48 = atomics_a_mask_size_6 & atomics_a_mask_eq_48; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_48 = atomics_a_mask_sub_0_1_6 | _atomics_a_mask_acc_T_48; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_49 = atomics_a_mask_sub_0_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_49 = atomics_a_mask_size_6 & atomics_a_mask_eq_49; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_49 = atomics_a_mask_sub_0_1_6 | _atomics_a_mask_acc_T_49; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_50 = atomics_a_mask_sub_1_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_50 = atomics_a_mask_size_6 & atomics_a_mask_eq_50; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_50 = atomics_a_mask_sub_1_1_6 | _atomics_a_mask_acc_T_50; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_51 = atomics_a_mask_sub_1_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_51 = atomics_a_mask_size_6 & atomics_a_mask_eq_51; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_51 = atomics_a_mask_sub_1_1_6 | _atomics_a_mask_acc_T_51; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_52 = atomics_a_mask_sub_2_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_52 = atomics_a_mask_size_6 & atomics_a_mask_eq_52; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_52 = atomics_a_mask_sub_2_1_6 | _atomics_a_mask_acc_T_52; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_53 = atomics_a_mask_sub_2_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_53 = atomics_a_mask_size_6 & atomics_a_mask_eq_53; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_53 = atomics_a_mask_sub_2_1_6 | _atomics_a_mask_acc_T_53; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_54 = atomics_a_mask_sub_3_2_6 & atomics_a_mask_nbit_6; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_54 = atomics_a_mask_size_6 & atomics_a_mask_eq_54; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_54 = atomics_a_mask_sub_3_1_6 | _atomics_a_mask_acc_T_54; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_55 = atomics_a_mask_sub_3_2_6 & atomics_a_mask_bit_6; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_55 = atomics_a_mask_size_6 & atomics_a_mask_eq_55; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_55 = atomics_a_mask_sub_3_1_6 | _atomics_a_mask_acc_T_55; // @[Misc.scala:215:{29,38}]
wire [1:0] atomics_a_mask_lo_lo_6 = {atomics_a_mask_acc_49, atomics_a_mask_acc_48}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_lo_hi_6 = {atomics_a_mask_acc_51, atomics_a_mask_acc_50}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_lo_6 = {atomics_a_mask_lo_hi_6, atomics_a_mask_lo_lo_6}; // @[Misc.scala:222:10]
wire [1:0] atomics_a_mask_hi_lo_6 = {atomics_a_mask_acc_53, atomics_a_mask_acc_52}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_hi_hi_6 = {atomics_a_mask_acc_55, atomics_a_mask_acc_54}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_hi_6 = {atomics_a_mask_hi_hi_6, atomics_a_mask_hi_lo_6}; // @[Misc.scala:222:10]
assign _atomics_a_mask_T_6 = {atomics_a_mask_hi_6, atomics_a_mask_lo_6}; // @[Misc.scala:222:10]
assign atomics_a_6_mask = _atomics_a_mask_T_6; // @[Misc.scala:222:10]
wire [40:0] _atomics_legal_T_425 = {1'h0, _atomics_legal_T_424}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_426 = _atomics_legal_T_425 & 41'hFFFD8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_427 = _atomics_legal_T_426; // @[Parameters.scala:137:46]
wire _atomics_legal_T_428 = _atomics_legal_T_427 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_430 = {1'h0, _atomics_legal_T_429}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_431 = _atomics_legal_T_430 & 41'hFFFE9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_432 = _atomics_legal_T_431; // @[Parameters.scala:137:46]
wire _atomics_legal_T_433 = _atomics_legal_T_432 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_435 = {1'h0, _atomics_legal_T_434}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_436 = _atomics_legal_T_435 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_437 = _atomics_legal_T_436; // @[Parameters.scala:137:46]
wire _atomics_legal_T_438 = _atomics_legal_T_437 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_440 = {1'h0, _atomics_legal_T_439}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_441 = _atomics_legal_T_440 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_442 = _atomics_legal_T_441; // @[Parameters.scala:137:46]
wire _atomics_legal_T_443 = _atomics_legal_T_442 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_445 = {1'h0, _atomics_legal_T_444}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_446 = _atomics_legal_T_445 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_447 = _atomics_legal_T_446; // @[Parameters.scala:137:46]
wire _atomics_legal_T_448 = _atomics_legal_T_447 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_450 = {1'h0, _atomics_legal_T_449}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_451 = _atomics_legal_T_450 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_452 = _atomics_legal_T_451; // @[Parameters.scala:137:46]
wire _atomics_legal_T_453 = _atomics_legal_T_452 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_455 = {1'h0, _atomics_legal_T_454}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_456 = _atomics_legal_T_455 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_457 = _atomics_legal_T_456; // @[Parameters.scala:137:46]
wire _atomics_legal_T_458 = _atomics_legal_T_457 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_460 = {1'h0, _atomics_legal_T_459}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_461 = _atomics_legal_T_460 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_462 = _atomics_legal_T_461; // @[Parameters.scala:137:46]
wire _atomics_legal_T_463 = _atomics_legal_T_462 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _atomics_legal_T_464 = _atomics_legal_T_428 | _atomics_legal_T_433; // @[Parameters.scala:685:42]
wire _atomics_legal_T_465 = _atomics_legal_T_464 | _atomics_legal_T_438; // @[Parameters.scala:685:42]
wire _atomics_legal_T_466 = _atomics_legal_T_465 | _atomics_legal_T_443; // @[Parameters.scala:685:42]
wire _atomics_legal_T_467 = _atomics_legal_T_466 | _atomics_legal_T_448; // @[Parameters.scala:685:42]
wire _atomics_legal_T_468 = _atomics_legal_T_467 | _atomics_legal_T_453; // @[Parameters.scala:685:42]
wire _atomics_legal_T_469 = _atomics_legal_T_468 | _atomics_legal_T_458; // @[Parameters.scala:685:42]
wire _atomics_legal_T_470 = _atomics_legal_T_469 | _atomics_legal_T_463; // @[Parameters.scala:685:42]
wire _atomics_legal_T_471 = _atomics_legal_T_470; // @[Parameters.scala:684:54, :685:42]
wire _atomics_legal_T_479 = _atomics_legal_T_471; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _atomics_legal_T_474 = {1'h0, _atomics_legal_T_473}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_475 = _atomics_legal_T_474 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_476 = _atomics_legal_T_475; // @[Parameters.scala:137:46]
wire _atomics_legal_T_477 = _atomics_legal_T_476 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire atomics_legal_7 = _atomics_legal_T_479; // @[Parameters.scala:686:26]
wire [7:0] _atomics_a_mask_T_7; // @[Misc.scala:222:10]
wire [7:0] atomics_a_7_mask; // @[Edges.scala:517:17]
wire [1:0] atomics_a_mask_sizeOH_shiftAmount_7 = _atomics_a_mask_sizeOH_T_21[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _atomics_a_mask_sizeOH_T_22 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_7; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _atomics_a_mask_sizeOH_T_23 = _atomics_a_mask_sizeOH_T_22[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] atomics_a_mask_sizeOH_7 = {_atomics_a_mask_sizeOH_T_23[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire atomics_a_mask_sub_sub_sub_0_1_7 = &s2_req_size; // @[Misc.scala:206:21]
wire atomics_a_mask_sub_sub_size_7 = atomics_a_mask_sizeOH_7[2]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_sub_1_2_7 = atomics_a_mask_sub_sub_bit_7; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_sub_nbit_7 = ~atomics_a_mask_sub_sub_bit_7; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_sub_0_2_7 = atomics_a_mask_sub_sub_nbit_7; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_sub_acc_T_14 = atomics_a_mask_sub_sub_size_7 & atomics_a_mask_sub_sub_0_2_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_0_1_7 = atomics_a_mask_sub_sub_sub_0_1_7 | _atomics_a_mask_sub_sub_acc_T_14; // @[Misc.scala:206:21, :215:{29,38}]
wire _atomics_a_mask_sub_sub_acc_T_15 = atomics_a_mask_sub_sub_size_7 & atomics_a_mask_sub_sub_1_2_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_1_1_7 = atomics_a_mask_sub_sub_sub_0_1_7 | _atomics_a_mask_sub_sub_acc_T_15; // @[Misc.scala:206:21, :215:{29,38}]
wire atomics_a_mask_sub_size_7 = atomics_a_mask_sizeOH_7[1]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_nbit_7 = ~atomics_a_mask_sub_bit_7; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_0_2_7 = atomics_a_mask_sub_sub_0_2_7 & atomics_a_mask_sub_nbit_7; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_28 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_0_2_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_0_1_7 = atomics_a_mask_sub_sub_0_1_7 | _atomics_a_mask_sub_acc_T_28; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_1_2_7 = atomics_a_mask_sub_sub_0_2_7 & atomics_a_mask_sub_bit_7; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_29 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_1_2_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_1_1_7 = atomics_a_mask_sub_sub_0_1_7 | _atomics_a_mask_sub_acc_T_29; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_2_2_7 = atomics_a_mask_sub_sub_1_2_7 & atomics_a_mask_sub_nbit_7; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_30 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_2_2_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_2_1_7 = atomics_a_mask_sub_sub_1_1_7 | _atomics_a_mask_sub_acc_T_30; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_3_2_7 = atomics_a_mask_sub_sub_1_2_7 & atomics_a_mask_sub_bit_7; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_31 = atomics_a_mask_sub_size_7 & atomics_a_mask_sub_3_2_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_3_1_7 = atomics_a_mask_sub_sub_1_1_7 | _atomics_a_mask_sub_acc_T_31; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_size_7 = atomics_a_mask_sizeOH_7[0]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_nbit_7 = ~atomics_a_mask_bit_7; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_eq_56 = atomics_a_mask_sub_0_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_56 = atomics_a_mask_size_7 & atomics_a_mask_eq_56; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_56 = atomics_a_mask_sub_0_1_7 | _atomics_a_mask_acc_T_56; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_57 = atomics_a_mask_sub_0_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_57 = atomics_a_mask_size_7 & atomics_a_mask_eq_57; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_57 = atomics_a_mask_sub_0_1_7 | _atomics_a_mask_acc_T_57; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_58 = atomics_a_mask_sub_1_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_58 = atomics_a_mask_size_7 & atomics_a_mask_eq_58; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_58 = atomics_a_mask_sub_1_1_7 | _atomics_a_mask_acc_T_58; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_59 = atomics_a_mask_sub_1_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_59 = atomics_a_mask_size_7 & atomics_a_mask_eq_59; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_59 = atomics_a_mask_sub_1_1_7 | _atomics_a_mask_acc_T_59; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_60 = atomics_a_mask_sub_2_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_60 = atomics_a_mask_size_7 & atomics_a_mask_eq_60; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_60 = atomics_a_mask_sub_2_1_7 | _atomics_a_mask_acc_T_60; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_61 = atomics_a_mask_sub_2_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_61 = atomics_a_mask_size_7 & atomics_a_mask_eq_61; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_61 = atomics_a_mask_sub_2_1_7 | _atomics_a_mask_acc_T_61; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_62 = atomics_a_mask_sub_3_2_7 & atomics_a_mask_nbit_7; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_62 = atomics_a_mask_size_7 & atomics_a_mask_eq_62; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_62 = atomics_a_mask_sub_3_1_7 | _atomics_a_mask_acc_T_62; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_63 = atomics_a_mask_sub_3_2_7 & atomics_a_mask_bit_7; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_63 = atomics_a_mask_size_7 & atomics_a_mask_eq_63; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_63 = atomics_a_mask_sub_3_1_7 | _atomics_a_mask_acc_T_63; // @[Misc.scala:215:{29,38}]
wire [1:0] atomics_a_mask_lo_lo_7 = {atomics_a_mask_acc_57, atomics_a_mask_acc_56}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_lo_hi_7 = {atomics_a_mask_acc_59, atomics_a_mask_acc_58}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_lo_7 = {atomics_a_mask_lo_hi_7, atomics_a_mask_lo_lo_7}; // @[Misc.scala:222:10]
wire [1:0] atomics_a_mask_hi_lo_7 = {atomics_a_mask_acc_61, atomics_a_mask_acc_60}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_hi_hi_7 = {atomics_a_mask_acc_63, atomics_a_mask_acc_62}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_hi_7 = {atomics_a_mask_hi_hi_7, atomics_a_mask_hi_lo_7}; // @[Misc.scala:222:10]
assign _atomics_a_mask_T_7 = {atomics_a_mask_hi_7, atomics_a_mask_lo_7}; // @[Misc.scala:222:10]
assign atomics_a_7_mask = _atomics_a_mask_T_7; // @[Misc.scala:222:10]
wire [40:0] _atomics_legal_T_485 = {1'h0, _atomics_legal_T_484}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_486 = _atomics_legal_T_485 & 41'hFFFD8000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_487 = _atomics_legal_T_486; // @[Parameters.scala:137:46]
wire _atomics_legal_T_488 = _atomics_legal_T_487 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_490 = {1'h0, _atomics_legal_T_489}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_491 = _atomics_legal_T_490 & 41'hFFFE9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_492 = _atomics_legal_T_491; // @[Parameters.scala:137:46]
wire _atomics_legal_T_493 = _atomics_legal_T_492 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_495 = {1'h0, _atomics_legal_T_494}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_496 = _atomics_legal_T_495 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_497 = _atomics_legal_T_496; // @[Parameters.scala:137:46]
wire _atomics_legal_T_498 = _atomics_legal_T_497 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_500 = {1'h0, _atomics_legal_T_499}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_501 = _atomics_legal_T_500 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_502 = _atomics_legal_T_501; // @[Parameters.scala:137:46]
wire _atomics_legal_T_503 = _atomics_legal_T_502 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_505 = {1'h0, _atomics_legal_T_504}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_506 = _atomics_legal_T_505 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_507 = _atomics_legal_T_506; // @[Parameters.scala:137:46]
wire _atomics_legal_T_508 = _atomics_legal_T_507 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_510 = {1'h0, _atomics_legal_T_509}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_511 = _atomics_legal_T_510 & 41'hFC000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_512 = _atomics_legal_T_511; // @[Parameters.scala:137:46]
wire _atomics_legal_T_513 = _atomics_legal_T_512 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_515 = {1'h0, _atomics_legal_T_514}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_516 = _atomics_legal_T_515 & 41'hFFFF9000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_517 = _atomics_legal_T_516; // @[Parameters.scala:137:46]
wire _atomics_legal_T_518 = _atomics_legal_T_517 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [40:0] _atomics_legal_T_520 = {1'h0, _atomics_legal_T_519}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_521 = _atomics_legal_T_520 & 41'hF0000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_522 = _atomics_legal_T_521; // @[Parameters.scala:137:46]
wire _atomics_legal_T_523 = _atomics_legal_T_522 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _atomics_legal_T_524 = _atomics_legal_T_488 | _atomics_legal_T_493; // @[Parameters.scala:685:42]
wire _atomics_legal_T_525 = _atomics_legal_T_524 | _atomics_legal_T_498; // @[Parameters.scala:685:42]
wire _atomics_legal_T_526 = _atomics_legal_T_525 | _atomics_legal_T_503; // @[Parameters.scala:685:42]
wire _atomics_legal_T_527 = _atomics_legal_T_526 | _atomics_legal_T_508; // @[Parameters.scala:685:42]
wire _atomics_legal_T_528 = _atomics_legal_T_527 | _atomics_legal_T_513; // @[Parameters.scala:685:42]
wire _atomics_legal_T_529 = _atomics_legal_T_528 | _atomics_legal_T_518; // @[Parameters.scala:685:42]
wire _atomics_legal_T_530 = _atomics_legal_T_529 | _atomics_legal_T_523; // @[Parameters.scala:685:42]
wire _atomics_legal_T_531 = _atomics_legal_T_530; // @[Parameters.scala:684:54, :685:42]
wire _atomics_legal_T_539 = _atomics_legal_T_531; // @[Parameters.scala:684:54, :686:26]
wire [40:0] _atomics_legal_T_534 = {1'h0, _atomics_legal_T_533}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _atomics_legal_T_535 = _atomics_legal_T_534 & 41'hFFFF0000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _atomics_legal_T_536 = _atomics_legal_T_535; // @[Parameters.scala:137:46]
wire _atomics_legal_T_537 = _atomics_legal_T_536 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire atomics_legal_8 = _atomics_legal_T_539; // @[Parameters.scala:686:26]
wire [7:0] _atomics_a_mask_T_8; // @[Misc.scala:222:10]
wire [7:0] atomics_a_8_mask; // @[Edges.scala:517:17]
wire [1:0] atomics_a_mask_sizeOH_shiftAmount_8 = _atomics_a_mask_sizeOH_T_24[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _atomics_a_mask_sizeOH_T_25 = 4'h1 << atomics_a_mask_sizeOH_shiftAmount_8; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _atomics_a_mask_sizeOH_T_26 = _atomics_a_mask_sizeOH_T_25[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] atomics_a_mask_sizeOH_8 = {_atomics_a_mask_sizeOH_T_26[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire atomics_a_mask_sub_sub_sub_0_1_8 = &s2_req_size; // @[Misc.scala:206:21]
wire atomics_a_mask_sub_sub_size_8 = atomics_a_mask_sizeOH_8[2]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_sub_1_2_8 = atomics_a_mask_sub_sub_bit_8; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_sub_nbit_8 = ~atomics_a_mask_sub_sub_bit_8; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_sub_0_2_8 = atomics_a_mask_sub_sub_nbit_8; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_sub_acc_T_16 = atomics_a_mask_sub_sub_size_8 & atomics_a_mask_sub_sub_0_2_8; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_0_1_8 = atomics_a_mask_sub_sub_sub_0_1_8 | _atomics_a_mask_sub_sub_acc_T_16; // @[Misc.scala:206:21, :215:{29,38}]
wire _atomics_a_mask_sub_sub_acc_T_17 = atomics_a_mask_sub_sub_size_8 & atomics_a_mask_sub_sub_1_2_8; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_sub_1_1_8 = atomics_a_mask_sub_sub_sub_0_1_8 | _atomics_a_mask_sub_sub_acc_T_17; // @[Misc.scala:206:21, :215:{29,38}]
wire atomics_a_mask_sub_size_8 = atomics_a_mask_sizeOH_8[1]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_sub_nbit_8 = ~atomics_a_mask_sub_bit_8; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_sub_0_2_8 = atomics_a_mask_sub_sub_0_2_8 & atomics_a_mask_sub_nbit_8; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_32 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_0_2_8; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_0_1_8 = atomics_a_mask_sub_sub_0_1_8 | _atomics_a_mask_sub_acc_T_32; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_1_2_8 = atomics_a_mask_sub_sub_0_2_8 & atomics_a_mask_sub_bit_8; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_33 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_1_2_8; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_1_1_8 = atomics_a_mask_sub_sub_0_1_8 | _atomics_a_mask_sub_acc_T_33; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_2_2_8 = atomics_a_mask_sub_sub_1_2_8 & atomics_a_mask_sub_nbit_8; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_sub_acc_T_34 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_2_2_8; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_2_1_8 = atomics_a_mask_sub_sub_1_1_8 | _atomics_a_mask_sub_acc_T_34; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_sub_3_2_8 = atomics_a_mask_sub_sub_1_2_8 & atomics_a_mask_sub_bit_8; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_sub_acc_T_35 = atomics_a_mask_sub_size_8 & atomics_a_mask_sub_3_2_8; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_sub_3_1_8 = atomics_a_mask_sub_sub_1_1_8 | _atomics_a_mask_sub_acc_T_35; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_size_8 = atomics_a_mask_sizeOH_8[0]; // @[Misc.scala:202:81, :209:26]
wire atomics_a_mask_nbit_8 = ~atomics_a_mask_bit_8; // @[Misc.scala:210:26, :211:20]
wire atomics_a_mask_eq_64 = atomics_a_mask_sub_0_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_64 = atomics_a_mask_size_8 & atomics_a_mask_eq_64; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_64 = atomics_a_mask_sub_0_1_8 | _atomics_a_mask_acc_T_64; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_65 = atomics_a_mask_sub_0_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_65 = atomics_a_mask_size_8 & atomics_a_mask_eq_65; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_65 = atomics_a_mask_sub_0_1_8 | _atomics_a_mask_acc_T_65; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_66 = atomics_a_mask_sub_1_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_66 = atomics_a_mask_size_8 & atomics_a_mask_eq_66; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_66 = atomics_a_mask_sub_1_1_8 | _atomics_a_mask_acc_T_66; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_67 = atomics_a_mask_sub_1_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_67 = atomics_a_mask_size_8 & atomics_a_mask_eq_67; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_67 = atomics_a_mask_sub_1_1_8 | _atomics_a_mask_acc_T_67; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_68 = atomics_a_mask_sub_2_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_68 = atomics_a_mask_size_8 & atomics_a_mask_eq_68; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_68 = atomics_a_mask_sub_2_1_8 | _atomics_a_mask_acc_T_68; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_69 = atomics_a_mask_sub_2_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_69 = atomics_a_mask_size_8 & atomics_a_mask_eq_69; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_69 = atomics_a_mask_sub_2_1_8 | _atomics_a_mask_acc_T_69; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_70 = atomics_a_mask_sub_3_2_8 & atomics_a_mask_nbit_8; // @[Misc.scala:211:20, :214:27]
wire _atomics_a_mask_acc_T_70 = atomics_a_mask_size_8 & atomics_a_mask_eq_70; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_70 = atomics_a_mask_sub_3_1_8 | _atomics_a_mask_acc_T_70; // @[Misc.scala:215:{29,38}]
wire atomics_a_mask_eq_71 = atomics_a_mask_sub_3_2_8 & atomics_a_mask_bit_8; // @[Misc.scala:210:26, :214:27]
wire _atomics_a_mask_acc_T_71 = atomics_a_mask_size_8 & atomics_a_mask_eq_71; // @[Misc.scala:209:26, :214:27, :215:38]
wire atomics_a_mask_acc_71 = atomics_a_mask_sub_3_1_8 | _atomics_a_mask_acc_T_71; // @[Misc.scala:215:{29,38}]
wire [1:0] atomics_a_mask_lo_lo_8 = {atomics_a_mask_acc_65, atomics_a_mask_acc_64}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_lo_hi_8 = {atomics_a_mask_acc_67, atomics_a_mask_acc_66}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_lo_8 = {atomics_a_mask_lo_hi_8, atomics_a_mask_lo_lo_8}; // @[Misc.scala:222:10]
wire [1:0] atomics_a_mask_hi_lo_8 = {atomics_a_mask_acc_69, atomics_a_mask_acc_68}; // @[Misc.scala:215:29, :222:10]
wire [1:0] atomics_a_mask_hi_hi_8 = {atomics_a_mask_acc_71, atomics_a_mask_acc_70}; // @[Misc.scala:215:29, :222:10]
wire [3:0] atomics_a_mask_hi_8 = {atomics_a_mask_hi_hi_8, atomics_a_mask_hi_lo_8}; // @[Misc.scala:222:10]
assign _atomics_a_mask_T_8 = {atomics_a_mask_hi_8, atomics_a_mask_lo_8}; // @[Misc.scala:222:10]
assign atomics_a_8_mask = _atomics_a_mask_T_8; // @[Misc.scala:222:10]
wire [2:0] _GEN_95 = _atomics_T ? 3'h3 : 3'h0; // @[DCache.scala:587:81]
wire [2:0] _atomics_T_1_opcode; // @[DCache.scala:587:81]
assign _atomics_T_1_opcode = _GEN_95; // @[DCache.scala:587:81]
wire [2:0] _atomics_T_1_param; // @[DCache.scala:587:81]
assign _atomics_T_1_param = _GEN_95; // @[DCache.scala:587:81]
wire [3:0] _atomics_T_1_size = _atomics_T ? atomics_a_size : 4'h0; // @[Edges.scala:534:17]
wire _atomics_T_1_source = _atomics_T & atomics_a_source; // @[Edges.scala:534:17]
wire [31:0] _atomics_T_1_address = _atomics_T ? atomics_a_address : 32'h0; // @[Edges.scala:534:17]
wire [7:0] _atomics_T_1_mask = _atomics_T ? atomics_a_mask : 8'h0; // @[Edges.scala:534:17]
wire [63:0] _atomics_T_1_data = _atomics_T ? atomics_a_data : 64'h0; // @[Edges.scala:534:17]
wire [2:0] _atomics_T_3_opcode = _atomics_T_2 ? 3'h3 : _atomics_T_1_opcode; // @[DCache.scala:587:81]
wire [2:0] _atomics_T_3_param = _atomics_T_2 ? 3'h0 : _atomics_T_1_param; // @[DCache.scala:587:81]
wire [3:0] _atomics_T_3_size = _atomics_T_2 ? atomics_a_1_size : _atomics_T_1_size; // @[Edges.scala:534:17]
wire _atomics_T_3_source = _atomics_T_2 ? atomics_a_1_source : _atomics_T_1_source; // @[Edges.scala:534:17]
wire [31:0] _atomics_T_3_address = _atomics_T_2 ? atomics_a_1_address : _atomics_T_1_address; // @[Edges.scala:534:17]
wire [7:0] _atomics_T_3_mask = _atomics_T_2 ? atomics_a_1_mask : _atomics_T_1_mask; // @[Edges.scala:534:17]
wire [63:0] _atomics_T_3_data = _atomics_T_2 ? atomics_a_1_data : _atomics_T_1_data; // @[Edges.scala:534:17]
wire [2:0] _atomics_T_5_opcode = _atomics_T_4 ? 3'h3 : _atomics_T_3_opcode; // @[DCache.scala:587:81]
wire [2:0] _atomics_T_5_param = _atomics_T_4 ? 3'h1 : _atomics_T_3_param; // @[DCache.scala:587:81]
wire [3:0] _atomics_T_5_size = _atomics_T_4 ? atomics_a_2_size : _atomics_T_3_size; // @[Edges.scala:534:17]
wire _atomics_T_5_source = _atomics_T_4 ? atomics_a_2_source : _atomics_T_3_source; // @[Edges.scala:534:17]
wire [31:0] _atomics_T_5_address = _atomics_T_4 ? atomics_a_2_address : _atomics_T_3_address; // @[Edges.scala:534:17]
wire [7:0] _atomics_T_5_mask = _atomics_T_4 ? atomics_a_2_mask : _atomics_T_3_mask; // @[Edges.scala:534:17]
wire [63:0] _atomics_T_5_data = _atomics_T_4 ? atomics_a_2_data : _atomics_T_3_data; // @[Edges.scala:534:17]
wire [2:0] _atomics_T_7_opcode = _atomics_T_6 ? 3'h3 : _atomics_T_5_opcode; // @[DCache.scala:587:81]
wire [2:0] _atomics_T_7_param = _atomics_T_6 ? 3'h2 : _atomics_T_5_param; // @[DCache.scala:587:81]
wire [3:0] _atomics_T_7_size = _atomics_T_6 ? atomics_a_3_size : _atomics_T_5_size; // @[Edges.scala:534:17]
wire _atomics_T_7_source = _atomics_T_6 ? atomics_a_3_source : _atomics_T_5_source; // @[Edges.scala:534:17]
wire [31:0] _atomics_T_7_address = _atomics_T_6 ? atomics_a_3_address : _atomics_T_5_address; // @[Edges.scala:534:17]
wire [7:0] _atomics_T_7_mask = _atomics_T_6 ? atomics_a_3_mask : _atomics_T_5_mask; // @[Edges.scala:534:17]
wire [63:0] _atomics_T_7_data = _atomics_T_6 ? atomics_a_3_data : _atomics_T_5_data; // @[Edges.scala:534:17]
wire [2:0] _atomics_T_9_opcode = _atomics_T_8 ? 3'h2 : _atomics_T_7_opcode; // @[DCache.scala:587:81]
wire [2:0] _atomics_T_9_param = _atomics_T_8 ? 3'h4 : _atomics_T_7_param; // @[DCache.scala:587:81]
wire [3:0] _atomics_T_9_size = _atomics_T_8 ? atomics_a_4_size : _atomics_T_7_size; // @[Edges.scala:517:17]
wire _atomics_T_9_source = _atomics_T_8 ? atomics_a_4_source : _atomics_T_7_source; // @[Edges.scala:517:17]
wire [31:0] _atomics_T_9_address = _atomics_T_8 ? atomics_a_4_address : _atomics_T_7_address; // @[Edges.scala:517:17]
wire [7:0] _atomics_T_9_mask = _atomics_T_8 ? atomics_a_4_mask : _atomics_T_7_mask; // @[Edges.scala:517:17]
wire [63:0] _atomics_T_9_data = _atomics_T_8 ? atomics_a_4_data : _atomics_T_7_data; // @[Edges.scala:517:17]
wire [2:0] _atomics_T_11_opcode = _atomics_T_10 ? 3'h2 : _atomics_T_9_opcode; // @[DCache.scala:587:81]
wire [2:0] _atomics_T_11_param = _atomics_T_10 ? 3'h0 : _atomics_T_9_param; // @[DCache.scala:587:81]
wire [3:0] _atomics_T_11_size = _atomics_T_10 ? atomics_a_5_size : _atomics_T_9_size; // @[Edges.scala:517:17]
wire _atomics_T_11_source = _atomics_T_10 ? atomics_a_5_source : _atomics_T_9_source; // @[Edges.scala:517:17]
wire [31:0] _atomics_T_11_address = _atomics_T_10 ? atomics_a_5_address : _atomics_T_9_address; // @[Edges.scala:517:17]
wire [7:0] _atomics_T_11_mask = _atomics_T_10 ? atomics_a_5_mask : _atomics_T_9_mask; // @[Edges.scala:517:17]
wire [63:0] _atomics_T_11_data = _atomics_T_10 ? atomics_a_5_data : _atomics_T_9_data; // @[Edges.scala:517:17]
wire [2:0] _atomics_T_13_opcode = _atomics_T_12 ? 3'h2 : _atomics_T_11_opcode; // @[DCache.scala:587:81]
wire [2:0] _atomics_T_13_param = _atomics_T_12 ? 3'h1 : _atomics_T_11_param; // @[DCache.scala:587:81]
wire [3:0] _atomics_T_13_size = _atomics_T_12 ? atomics_a_6_size : _atomics_T_11_size; // @[Edges.scala:517:17]
wire _atomics_T_13_source = _atomics_T_12 ? atomics_a_6_source : _atomics_T_11_source; // @[Edges.scala:517:17]
wire [31:0] _atomics_T_13_address = _atomics_T_12 ? atomics_a_6_address : _atomics_T_11_address; // @[Edges.scala:517:17]
wire [7:0] _atomics_T_13_mask = _atomics_T_12 ? atomics_a_6_mask : _atomics_T_11_mask; // @[Edges.scala:517:17]
wire [63:0] _atomics_T_13_data = _atomics_T_12 ? atomics_a_6_data : _atomics_T_11_data; // @[Edges.scala:517:17]
wire [2:0] _atomics_T_15_opcode = _atomics_T_14 ? 3'h2 : _atomics_T_13_opcode; // @[DCache.scala:587:81]
wire [2:0] _atomics_T_15_param = _atomics_T_14 ? 3'h2 : _atomics_T_13_param; // @[DCache.scala:587:81]
wire [3:0] _atomics_T_15_size = _atomics_T_14 ? atomics_a_7_size : _atomics_T_13_size; // @[Edges.scala:517:17]
wire _atomics_T_15_source = _atomics_T_14 ? atomics_a_7_source : _atomics_T_13_source; // @[Edges.scala:517:17]
wire [31:0] _atomics_T_15_address = _atomics_T_14 ? atomics_a_7_address : _atomics_T_13_address; // @[Edges.scala:517:17]
wire [7:0] _atomics_T_15_mask = _atomics_T_14 ? atomics_a_7_mask : _atomics_T_13_mask; // @[Edges.scala:517:17]
wire [63:0] _atomics_T_15_data = _atomics_T_14 ? atomics_a_7_data : _atomics_T_13_data; // @[Edges.scala:517:17]
wire [2:0] atomics_opcode = _atomics_T_16 ? 3'h2 : _atomics_T_15_opcode; // @[DCache.scala:587:81]
wire [2:0] atomics_param = _atomics_T_16 ? 3'h3 : _atomics_T_15_param; // @[DCache.scala:587:81]
wire [3:0] atomics_size = _atomics_T_16 ? atomics_a_8_size : _atomics_T_15_size; // @[Edges.scala:517:17]
wire atomics_source = _atomics_T_16 ? atomics_a_8_source : _atomics_T_15_source; // @[Edges.scala:517:17]
wire [31:0] atomics_address = _atomics_T_16 ? atomics_a_8_address : _atomics_T_15_address; // @[Edges.scala:517:17]
wire [7:0] atomics_mask = _atomics_T_16 ? atomics_a_8_mask : _atomics_T_15_mask; // @[Edges.scala:517:17]
wire [63:0] atomics_data = _atomics_T_16 ? atomics_a_8_data : _atomics_T_15_data; // @[Edges.scala:517:17]
wire [39:0] _tl_out_a_valid_T_1 = {s2_req_addr[39:32], s2_req_addr[31:0] ^ release_ack_addr}; // @[DCache.scala:227:29, :339:19, :606:43]
wire [14:0] _tl_out_a_valid_T_2 = _tl_out_a_valid_T_1[20:6]; // @[DCache.scala:606:{43,62}]
wire _tl_out_a_valid_T_3 = _tl_out_a_valid_T_2 == 15'h0; // @[DCache.scala:582:29, :606:{62,118}]
wire _tl_out_a_valid_T_4 = release_ack_wait & _tl_out_a_valid_T_3; // @[DCache.scala:226:33, :606:{27,118}]
wire _tl_out_a_valid_T_5 = ~_tl_out_a_valid_T_4; // @[DCache.scala:606:{8,27}]
wire _tl_out_a_valid_T_6 = s2_valid_cached_miss & _tl_out_a_valid_T_5; // @[DCache.scala:425:60, :605:29, :606:8]
wire _tl_out_a_valid_T_7 = ~release_ack_wait; // @[DCache.scala:226:33, :607:47]
wire _tl_out_a_valid_T_10 = ~s2_victim_dirty; // @[Misc.scala:38:9]
wire _tl_out_a_valid_T_11 = _tl_out_a_valid_T_10; // @[DCache.scala:607:{88,91}]
wire _tl_out_a_valid_T_12 = _tl_out_a_valid_T_6 & _tl_out_a_valid_T_11; // @[DCache.scala:605:29, :606:127, :607:88]
wire _tl_out_a_valid_T_13 = s2_valid_uncached_pending | _tl_out_a_valid_T_12; // @[DCache.scala:430:64, :604:32, :606:127]
assign _tl_out_a_valid_T_14 = _tl_out_a_valid_T_13; // @[DCache.scala:603:37, :604:32]
assign tl_out_a_valid = _tl_out_a_valid_T_14; // @[DCache.scala:159:22, :603:37]
wire _tl_out_a_bits_T = ~s2_uncached; // @[DCache.scala:424:39, :425:47, :608:24]
wire [39:0] _tl_out_a_bits_T_2 = {_tl_out_a_bits_T_1, 6'h0}; // @[DCache.scala:1210:{39,60}]
wire [39:0] _tl_out_a_bits_legal_T_1 = _tl_out_a_bits_T_2; // @[DCache.scala:1210:60]
wire [40:0] _tl_out_a_bits_legal_T_2 = {1'h0, _tl_out_a_bits_legal_T_1}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _tl_out_a_bits_legal_T_3 = _tl_out_a_bits_legal_T_2 & 41'h8C020000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _tl_out_a_bits_legal_T_4 = _tl_out_a_bits_legal_T_3; // @[Parameters.scala:137:46]
wire _tl_out_a_bits_legal_T_5 = _tl_out_a_bits_legal_T_4 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _tl_out_a_bits_legal_T_6 = {_tl_out_a_bits_T_2[39:17], _tl_out_a_bits_T_2[16:0] ^ 17'h10000}; // @[DCache.scala:1210:60]
wire [40:0] _tl_out_a_bits_legal_T_7 = {1'h0, _tl_out_a_bits_legal_T_6}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _tl_out_a_bits_legal_T_8 = _tl_out_a_bits_legal_T_7 & 41'h8C031000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _tl_out_a_bits_legal_T_9 = _tl_out_a_bits_legal_T_8; // @[Parameters.scala:137:46]
wire _tl_out_a_bits_legal_T_10 = _tl_out_a_bits_legal_T_9 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _tl_out_a_bits_legal_T_11 = {_tl_out_a_bits_T_2[39:18], _tl_out_a_bits_T_2[17:0] ^ 18'h20000}; // @[DCache.scala:1210:60]
wire [40:0] _tl_out_a_bits_legal_T_12 = {1'h0, _tl_out_a_bits_legal_T_11}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _tl_out_a_bits_legal_T_13 = _tl_out_a_bits_legal_T_12 & 41'h8C030000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _tl_out_a_bits_legal_T_14 = _tl_out_a_bits_legal_T_13; // @[Parameters.scala:137:46]
wire _tl_out_a_bits_legal_T_15 = _tl_out_a_bits_legal_T_14 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [39:0] _tl_out_a_bits_legal_T_16 = {_tl_out_a_bits_T_2[39:28], _tl_out_a_bits_T_2[27:0] ^ 28'hC000000}; // @[DCache.scala:1210:60]
wire [40:0] _tl_out_a_bits_legal_T_17 = {1'h0, _tl_out_a_bits_legal_T_16}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _tl_out_a_bits_legal_T_18 = _tl_out_a_bits_legal_T_17 & 41'h8C000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _tl_out_a_bits_legal_T_19 = _tl_out_a_bits_legal_T_18; // @[Parameters.scala:137:46]
wire _tl_out_a_bits_legal_T_20 = _tl_out_a_bits_legal_T_19 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _tl_out_a_bits_legal_T_21 = _tl_out_a_bits_legal_T_5 | _tl_out_a_bits_legal_T_10; // @[Parameters.scala:685:42]
wire _tl_out_a_bits_legal_T_22 = _tl_out_a_bits_legal_T_21 | _tl_out_a_bits_legal_T_15; // @[Parameters.scala:685:42]
wire _tl_out_a_bits_legal_T_23 = _tl_out_a_bits_legal_T_22 | _tl_out_a_bits_legal_T_20; // @[Parameters.scala:685:42]
wire [39:0] _tl_out_a_bits_legal_T_27 = {_tl_out_a_bits_T_2[39:28], _tl_out_a_bits_T_2[27:0] ^ 28'h8000000}; // @[DCache.scala:1210:60]
wire [40:0] _tl_out_a_bits_legal_T_28 = {1'h0, _tl_out_a_bits_legal_T_27}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _tl_out_a_bits_legal_T_29 = _tl_out_a_bits_legal_T_28 & 41'h8C030000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _tl_out_a_bits_legal_T_30 = _tl_out_a_bits_legal_T_29; // @[Parameters.scala:137:46]
wire _tl_out_a_bits_legal_T_31 = _tl_out_a_bits_legal_T_30 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire [31:0] tl_out_a_bits_a_address = _tl_out_a_bits_T_2[31:0]; // @[Edges.scala:346:17]
wire [39:0] _tl_out_a_bits_legal_T_32 = {_tl_out_a_bits_T_2[39:32], tl_out_a_bits_a_address ^ 32'h80000000}; // @[Edges.scala:346:17]
wire [40:0] _tl_out_a_bits_legal_T_33 = {1'h0, _tl_out_a_bits_legal_T_32}; // @[Parameters.scala:137:{31,41}]
wire [40:0] _tl_out_a_bits_legal_T_34 = _tl_out_a_bits_legal_T_33 & 41'h80000000; // @[Parameters.scala:137:{41,46}]
wire [40:0] _tl_out_a_bits_legal_T_35 = _tl_out_a_bits_legal_T_34; // @[Parameters.scala:137:46]
wire _tl_out_a_bits_legal_T_36 = _tl_out_a_bits_legal_T_35 == 41'h0; // @[Parameters.scala:137:{46,59}]
wire _tl_out_a_bits_legal_T_37 = _tl_out_a_bits_legal_T_31 | _tl_out_a_bits_legal_T_36; // @[Parameters.scala:685:42]
wire _tl_out_a_bits_legal_T_38 = _tl_out_a_bits_legal_T_37; // @[Parameters.scala:684:54, :685:42]
wire tl_out_a_bits_legal = _tl_out_a_bits_legal_T_38; // @[Parameters.scala:684:54, :686:26]
wire [2:0] tl_out_a_bits_a_param; // @[Edges.scala:346:17]
assign tl_out_a_bits_a_param = {1'h0, s2_grow_param}; // @[Misc.scala:35:36]
wire tl_out_a_bits_a_mask_sub_sub_bit = _tl_out_a_bits_T_2[2]; // @[Misc.scala:210:26]
wire tl_out_a_bits_a_mask_sub_sub_1_2 = tl_out_a_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire tl_out_a_bits_a_mask_sub_sub_nbit = ~tl_out_a_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire tl_out_a_bits_a_mask_sub_sub_0_2 = tl_out_a_bits_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _tl_out_a_bits_a_mask_sub_sub_acc_T = tl_out_a_bits_a_mask_sub_sub_0_2; // @[Misc.scala:214:27, :215:38]
wire _tl_out_a_bits_a_mask_sub_sub_acc_T_1 = tl_out_a_bits_a_mask_sub_sub_1_2; // @[Misc.scala:214:27, :215:38]
wire tl_out_a_bits_a_mask_sub_bit = _tl_out_a_bits_T_2[1]; // @[Misc.scala:210:26]
wire tl_out_a_bits_a_mask_sub_nbit = ~tl_out_a_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire tl_out_a_bits_a_mask_sub_0_2 = tl_out_a_bits_a_mask_sub_sub_0_2 & tl_out_a_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire tl_out_a_bits_a_mask_sub_1_2 = tl_out_a_bits_a_mask_sub_sub_0_2 & tl_out_a_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire tl_out_a_bits_a_mask_sub_2_2 = tl_out_a_bits_a_mask_sub_sub_1_2 & tl_out_a_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire tl_out_a_bits_a_mask_sub_3_2 = tl_out_a_bits_a_mask_sub_sub_1_2 & tl_out_a_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire tl_out_a_bits_a_mask_bit = _tl_out_a_bits_T_2[0]; // @[Misc.scala:210:26]
wire tl_out_a_bits_a_mask_nbit = ~tl_out_a_bits_a_mask_bit; // @[Misc.scala:210:26, :211:20]
wire tl_out_a_bits_a_mask_eq = tl_out_a_bits_a_mask_sub_0_2 & tl_out_a_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _tl_out_a_bits_a_mask_acc_T = tl_out_a_bits_a_mask_eq; // @[Misc.scala:214:27, :215:38]
wire tl_out_a_bits_a_mask_eq_1 = tl_out_a_bits_a_mask_sub_0_2 & tl_out_a_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _tl_out_a_bits_a_mask_acc_T_1 = tl_out_a_bits_a_mask_eq_1; // @[Misc.scala:214:27, :215:38]
wire tl_out_a_bits_a_mask_eq_2 = tl_out_a_bits_a_mask_sub_1_2 & tl_out_a_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _tl_out_a_bits_a_mask_acc_T_2 = tl_out_a_bits_a_mask_eq_2; // @[Misc.scala:214:27, :215:38]
wire tl_out_a_bits_a_mask_eq_3 = tl_out_a_bits_a_mask_sub_1_2 & tl_out_a_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _tl_out_a_bits_a_mask_acc_T_3 = tl_out_a_bits_a_mask_eq_3; // @[Misc.scala:214:27, :215:38]
wire tl_out_a_bits_a_mask_eq_4 = tl_out_a_bits_a_mask_sub_2_2 & tl_out_a_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _tl_out_a_bits_a_mask_acc_T_4 = tl_out_a_bits_a_mask_eq_4; // @[Misc.scala:214:27, :215:38]
wire tl_out_a_bits_a_mask_eq_5 = tl_out_a_bits_a_mask_sub_2_2 & tl_out_a_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _tl_out_a_bits_a_mask_acc_T_5 = tl_out_a_bits_a_mask_eq_5; // @[Misc.scala:214:27, :215:38]
wire tl_out_a_bits_a_mask_eq_6 = tl_out_a_bits_a_mask_sub_3_2 & tl_out_a_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _tl_out_a_bits_a_mask_acc_T_6 = tl_out_a_bits_a_mask_eq_6; // @[Misc.scala:214:27, :215:38]
wire tl_out_a_bits_a_mask_eq_7 = tl_out_a_bits_a_mask_sub_3_2 & tl_out_a_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _tl_out_a_bits_a_mask_acc_T_7 = tl_out_a_bits_a_mask_eq_7; // @[Misc.scala:214:27, :215:38]
wire _tl_out_a_bits_T_3 = ~s2_write; // @[DCache.scala:609:9]
wire _tl_out_a_bits_T_5 = ~s2_read; // @[DCache.scala:611:9]
wire [2:0] _tl_out_a_bits_T_6_opcode = _tl_out_a_bits_T_5 ? 3'h0 : atomics_opcode; // @[DCache.scala:587:81, :611:{8,9}]
wire [2:0] _tl_out_a_bits_T_6_param = _tl_out_a_bits_T_5 ? 3'h0 : atomics_param; // @[DCache.scala:587:81, :611:{8,9}]
wire [3:0] _tl_out_a_bits_T_6_size = _tl_out_a_bits_T_5 ? put_size : atomics_size; // @[Edges.scala:480:17]
wire _tl_out_a_bits_T_6_source = _tl_out_a_bits_T_5 ? put_source : atomics_source; // @[Edges.scala:480:17]
wire [31:0] _tl_out_a_bits_T_6_address = _tl_out_a_bits_T_5 ? put_address : atomics_address; // @[Edges.scala:480:17]
wire [7:0] _tl_out_a_bits_T_6_mask = _tl_out_a_bits_T_5 ? put_mask : atomics_mask; // @[Edges.scala:480:17]
wire [63:0] _tl_out_a_bits_T_6_data = _tl_out_a_bits_T_5 ? put_data : atomics_data; // @[Edges.scala:480:17]
wire [2:0] _tl_out_a_bits_T_7_opcode = _tl_out_a_bits_T_4 ? 3'h1 : _tl_out_a_bits_T_6_opcode; // @[DCache.scala:610:{8,20}, :611:8]
wire [2:0] _tl_out_a_bits_T_7_param = _tl_out_a_bits_T_4 ? 3'h0 : _tl_out_a_bits_T_6_param; // @[DCache.scala:610:{8,20}, :611:8]
wire [3:0] _tl_out_a_bits_T_7_size = _tl_out_a_bits_T_4 ? putpartial_size : _tl_out_a_bits_T_6_size; // @[Edges.scala:500:17]
wire _tl_out_a_bits_T_7_source = _tl_out_a_bits_T_4 ? putpartial_source : _tl_out_a_bits_T_6_source; // @[Edges.scala:500:17]
wire [31:0] _tl_out_a_bits_T_7_address = _tl_out_a_bits_T_4 ? putpartial_address : _tl_out_a_bits_T_6_address; // @[Edges.scala:500:17]
wire [7:0] _tl_out_a_bits_T_7_mask = _tl_out_a_bits_T_4 ? putpartial_mask : _tl_out_a_bits_T_6_mask; // @[Edges.scala:500:17]
wire [63:0] _tl_out_a_bits_T_7_data = _tl_out_a_bits_T_4 ? putpartial_data : _tl_out_a_bits_T_6_data; // @[Edges.scala:500:17]
wire [2:0] _tl_out_a_bits_T_8_opcode = _tl_out_a_bits_T_3 ? 3'h4 : _tl_out_a_bits_T_7_opcode; // @[DCache.scala:609:{8,9}, :610:8]
wire [2:0] _tl_out_a_bits_T_8_param = _tl_out_a_bits_T_3 ? 3'h0 : _tl_out_a_bits_T_7_param; // @[DCache.scala:609:{8,9}, :610:8]
wire [3:0] _tl_out_a_bits_T_8_size = _tl_out_a_bits_T_3 ? get_size : _tl_out_a_bits_T_7_size; // @[Edges.scala:460:17]
wire _tl_out_a_bits_T_8_source = _tl_out_a_bits_T_3 ? get_source : _tl_out_a_bits_T_7_source; // @[Edges.scala:460:17]
wire [31:0] _tl_out_a_bits_T_8_address = _tl_out_a_bits_T_3 ? get_address : _tl_out_a_bits_T_7_address; // @[Edges.scala:460:17]
wire [7:0] _tl_out_a_bits_T_8_mask = _tl_out_a_bits_T_3 ? get_mask : _tl_out_a_bits_T_7_mask; // @[Edges.scala:460:17]
wire [63:0] _tl_out_a_bits_T_8_data = _tl_out_a_bits_T_3 ? 64'h0 : _tl_out_a_bits_T_7_data; // @[DCache.scala:609:{8,9}, :610:8]
assign _tl_out_a_bits_T_9_opcode = _tl_out_a_bits_T ? 3'h6 : _tl_out_a_bits_T_8_opcode; // @[DCache.scala:608:{23,24}, :609:8]
assign _tl_out_a_bits_T_9_param = _tl_out_a_bits_T ? tl_out_a_bits_a_param : _tl_out_a_bits_T_8_param; // @[Edges.scala:346:17]
assign _tl_out_a_bits_T_9_size = _tl_out_a_bits_T ? 4'h6 : _tl_out_a_bits_T_8_size; // @[DCache.scala:608:{23,24}, :609:8]
assign _tl_out_a_bits_T_9_source = ~_tl_out_a_bits_T & _tl_out_a_bits_T_8_source; // @[DCache.scala:608:{23,24}, :609:8]
assign _tl_out_a_bits_T_9_address = _tl_out_a_bits_T ? tl_out_a_bits_a_address : _tl_out_a_bits_T_8_address; // @[Edges.scala:346:17]
assign _tl_out_a_bits_T_9_mask = _tl_out_a_bits_T ? 8'hFF : _tl_out_a_bits_T_8_mask; // @[DCache.scala:608:{23,24}, :609:8]
assign _tl_out_a_bits_T_9_data = _tl_out_a_bits_T ? 64'h0 : _tl_out_a_bits_T_8_data; // @[DCache.scala:608:{23,24}, :609:8]
assign tl_out_a_bits_opcode = _tl_out_a_bits_T_9_opcode; // @[DCache.scala:159:22, :608:23]
assign tl_out_a_bits_param = _tl_out_a_bits_T_9_param; // @[DCache.scala:159:22, :608:23]
assign tl_out_a_bits_size = _tl_out_a_bits_T_9_size; // @[DCache.scala:159:22, :608:23]
assign tl_out_a_bits_source = _tl_out_a_bits_T_9_source; // @[DCache.scala:159:22, :608:23]
assign tl_out_a_bits_address = _tl_out_a_bits_T_9_address; // @[DCache.scala:159:22, :608:23]
assign tl_out_a_bits_mask = _tl_out_a_bits_T_9_mask; // @[DCache.scala:159:22, :608:23]
assign tl_out_a_bits_data = _tl_out_a_bits_T_9_data; // @[DCache.scala:159:22, :608:23]
wire [1:0] _a_sel_T = 2'h1 << a_sel_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [1:0] _a_sel_T_1 = _a_sel_T; // @[OneHot.scala:65:{12,27}]
wire a_sel = _a_sel_T_1[1]; // @[OneHot.scala:65:27]
wire _io_cpu_perf_acquire_T = tl_out_a_ready & tl_out_a_valid; // @[Decoupled.scala:51:35]
wire [4:0] _uncachedReqs_0_cmd_T_1 = {_uncachedReqs_0_cmd_T, 4'h1}; // @[DCache.scala:637:{37,49}]
wire [4:0] _uncachedReqs_0_cmd_T_2 = s2_write ? _uncachedReqs_0_cmd_T_1 : 5'h0; // @[DCache.scala:637:{23,37}]
wire _T_82 = nodeOut_d_ready & nodeOut_d_valid; // @[Decoupled.scala:51:35]
wire _io_cpu_replay_next_T; // @[Decoupled.scala:51:35]
assign _io_cpu_replay_next_T = _T_82; // @[Decoupled.scala:51:35]
wire _io_cpu_perf_blocked_near_end_of_refill_T; // @[Decoupled.scala:51:35]
assign _io_cpu_perf_blocked_near_end_of_refill_T = _T_82; // @[Decoupled.scala:51:35]
wire _io_errors_bus_valid_T; // @[Decoupled.scala:51:35]
assign _io_errors_bus_valid_T = _T_82; // @[Decoupled.scala:51:35]
wire [26:0] _r_beats1_decode_T = 27'hFFF << nodeOut_d_bits_size; // @[package.scala:243:71]
wire [11:0] _r_beats1_decode_T_1 = _r_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _r_beats1_decode_T_2 = ~_r_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] r_beats1_decode = _r_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire r_beats1_opdata = nodeOut_d_bits_opcode[0]; // @[Edges.scala:106:36]
wire [8:0] r_beats1 = r_beats1_opdata ? r_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] r_counter; // @[Edges.scala:229:27]
wire [9:0] _r_counter1_T = {1'h0, r_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] r_counter1 = _r_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = r_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _r_last_T = r_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _r_last_T_1 = r_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_last = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_done = d_last & _T_82; // @[Decoupled.scala:51:35]
wire [8:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] r_4 = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _r_counter_T = d_first ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] d_address_inc = {r_4, 3'h0}; // @[Edges.scala:234:25, :269:29]
wire grantIsUncachedData = nodeOut_d_bits_opcode == 3'h1; // @[package.scala:16:47]
wire grantIsUncached = grantIsUncachedData | nodeOut_d_bits_opcode == 3'h0 | nodeOut_d_bits_opcode == 3'h2; // @[package.scala:16:47, :81:59]
wire _tl_d_data_encoded_T_11 = ~grantIsUncached; // @[package.scala:81:59]
wire _tl_d_data_encoded_T_12 = _tl_d_data_encoded_T_10 & _tl_d_data_encoded_T_11; // @[DCache.scala:663:{77,126,129}]
wire [15:0] tl_d_data_encoded_lo_lo_1 = {_tl_d_data_encoded_T_14, _tl_d_data_encoded_T_13}; // @[package.scala:45:27, :211:50]
wire [15:0] tl_d_data_encoded_lo_hi_1 = {_tl_d_data_encoded_T_16, _tl_d_data_encoded_T_15}; // @[package.scala:45:27, :211:50]
wire [31:0] tl_d_data_encoded_lo_1 = {tl_d_data_encoded_lo_hi_1, tl_d_data_encoded_lo_lo_1}; // @[package.scala:45:27]
wire [15:0] tl_d_data_encoded_hi_lo_1 = {_tl_d_data_encoded_T_18, _tl_d_data_encoded_T_17}; // @[package.scala:45:27, :211:50]
wire [15:0] tl_d_data_encoded_hi_hi_1 = {_tl_d_data_encoded_T_20, _tl_d_data_encoded_T_19}; // @[package.scala:45:27, :211:50]
wire [31:0] tl_d_data_encoded_hi_1 = {tl_d_data_encoded_hi_hi_1, tl_d_data_encoded_hi_lo_1}; // @[package.scala:45:27]
assign _tl_d_data_encoded_T_21 = {tl_d_data_encoded_hi_1, tl_d_data_encoded_lo_1}; // @[package.scala:45:27]
assign tl_d_data_encoded = _tl_d_data_encoded_T_21; // @[package.scala:45:27]
wire _grantIsCached_T = nodeOut_d_bits_opcode == 3'h4; // @[package.scala:16:47]
wire _GEN_96 = nodeOut_d_bits_opcode == 3'h5; // @[package.scala:16:47]
wire _grantIsCached_T_1; // @[package.scala:16:47]
assign _grantIsCached_T_1 = _GEN_96; // @[package.scala:16:47]
wire grantIsRefill; // @[DCache.scala:666:29]
assign grantIsRefill = _GEN_96; // @[package.scala:16:47]
wire grantIsCached = _grantIsCached_T | _grantIsCached_T_1; // @[package.scala:16:47, :81:59]
wire grantIsVoluntary = nodeOut_d_bits_opcode == 3'h6; // @[DCache.scala:665:32]
reg grantInProgress; // @[DCache.scala:667:32]
reg [2:0] blockProbeAfterGrantCount; // @[DCache.scala:668:42]
wire [3:0] _blockProbeAfterGrantCount_T = {1'h0, blockProbeAfterGrantCount} - 4'h1; // @[DCache.scala:668:42, :669:99]
wire [2:0] _blockProbeAfterGrantCount_T_1 = _blockProbeAfterGrantCount_T[2:0]; // @[DCache.scala:669:99]
wire _T_107 = release_state == 4'h6; // @[package.scala:16:47]
wire _canAcceptCachedGrant_T_1; // @[package.scala:16:47]
assign _canAcceptCachedGrant_T_1 = _T_107; // @[package.scala:16:47]
wire _metaArb_io_in_4_valid_T; // @[package.scala:16:47]
assign _metaArb_io_in_4_valid_T = _T_107; // @[package.scala:16:47]
wire _T_111 = release_state == 4'h9; // @[package.scala:16:47]
wire _canAcceptCachedGrant_T_2; // @[package.scala:16:47]
assign _canAcceptCachedGrant_T_2 = _T_111; // @[package.scala:16:47]
wire _nodeOut_c_valid_T_1; // @[DCache.scala:810:91]
assign _nodeOut_c_valid_T_1 = _T_111; // @[package.scala:16:47]
wire _canAcceptCachedGrant_T_3 = _canAcceptCachedGrant_T | _canAcceptCachedGrant_T_1; // @[package.scala:16:47, :81:59]
wire _canAcceptCachedGrant_T_4 = _canAcceptCachedGrant_T_3 | _canAcceptCachedGrant_T_2; // @[package.scala:16:47, :81:59]
wire canAcceptCachedGrant = ~_canAcceptCachedGrant_T_4; // @[package.scala:81:59]
wire _nodeOut_d_ready_T = ~d_first; // @[Edges.scala:231:25]
wire _nodeOut_d_ready_T_1 = _nodeOut_d_ready_T | nodeOut_e_ready; // @[DCache.scala:671:{41,50}]
wire _nodeOut_d_ready_T_2 = _nodeOut_d_ready_T_1 & canAcceptCachedGrant; // @[DCache.scala:670:30, :671:{50,69}]
wire _nodeOut_d_ready_T_3 = ~grantIsCached | _nodeOut_d_ready_T_2; // @[package.scala:81:59]
wire [1:0] _uncachedRespIdxOH_T = 2'h1 << uncachedRespIdxOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [1:0] _uncachedRespIdxOH_T_1 = _uncachedRespIdxOH_T; // @[OneHot.scala:65:{12,27}]
wire uncachedRespIdxOH = _uncachedRespIdxOH_T_1[1]; // @[OneHot.scala:65:27]
wire _uncachedResp_T = uncachedRespIdxOH; // @[Mux.scala:32:36]
wire _GEN_97 = _T_82 & grantIsCached; // @[Decoupled.scala:51:35]
assign replace = _GEN_97 & d_last; // @[Replacement.scala:37:29, :38:11]
wire _T_74 = uncachedRespIdxOH & d_last; // @[Edges.scala:232:33]
assign s1_data_way = ~_T_82 | grantIsCached | ~(grantIsUncached & grantIsUncachedData) ? {1'h0, _s1_data_way_T} : 5'h10; // @[Decoupled.scala:51:35]
wire [28:0] _s2_req_addr_dontCareBits_T = s1_paddr[31:3]; // @[DCache.scala:298:21, :701:41]
wire [31:0] s2_req_addr_dontCareBits = {_s2_req_addr_dontCareBits_T, 3'h0}; // @[DCache.scala:701:{41,55}]
wire [2:0] _s2_req_addr_T = uncachedResp_addr[2:0]; // @[DCache.scala:238:30, :702:45]
wire [31:0] _s2_req_addr_T_1 = {s2_req_addr_dontCareBits[31:3], s2_req_addr_dontCareBits[2:0] | _s2_req_addr_T}; // @[DCache.scala:701:55, :702:{26,45}]
wire _nodeOut_e_valid_T = nodeOut_d_valid & d_first; // @[Edges.scala:231:25]
wire _nodeOut_e_valid_T_1 = _nodeOut_e_valid_T & grantIsCached; // @[package.scala:81:59]
wire _nodeOut_e_valid_T_2 = _nodeOut_e_valid_T_1 & canAcceptCachedGrant; // @[DCache.scala:670:30, :714:{47,64}]
assign nodeOut_e_bits_sink = nodeOut_e_bits_e_sink; // @[Edges.scala:451:17]
wire _dataArb_io_in_1_valid_T = nodeOut_d_valid & grantIsRefill; // @[DCache.scala:666:29, :721:44]
wire _dataArb_io_in_1_valid_T_1 = _dataArb_io_in_1_valid_T & canAcceptCachedGrant; // @[DCache.scala:670:30, :721:{44,61}]
wire _T_90 = grantIsRefill & ~dataArb_io_in_1_ready; // @[DCache.scala:152:28, :666:29, :722:{23,26}]
assign nodeOut_e_valid = ~_T_90 & _nodeOut_e_valid_T_2; // @[DCache.scala:714:{18,64}, :722:{23,51}, :723:20]
wire [33:0] _dataArb_io_in_1_bits_addr_T = s2_vaddr[39:6]; // @[DCache.scala:351:21, :728:46]
wire [39:0] _dataArb_io_in_1_bits_addr_T_1 = {_dataArb_io_in_1_bits_addr_T, 6'h0}; // @[DCache.scala:728:{46,57}]
wire [39:0] _dataArb_io_in_1_bits_addr_T_2 = {_dataArb_io_in_1_bits_addr_T_1[39:12], _dataArb_io_in_1_bits_addr_T_1[11:0] | d_address_inc}; // @[Edges.scala:269:29]
assign dataArb_io_in_1_bits_addr = _dataArb_io_in_1_bits_addr_T_2[7:0]; // @[DCache.scala:152:28, :728:{32,67}]
wire _metaArb_io_in_3_valid_T = grantIsCached & d_done; // @[package.scala:81:59]
wire _metaArb_io_in_3_valid_T_1 = ~nodeOut_d_bits_denied; // @[DCache.scala:741:56]
assign _metaArb_io_in_3_valid_T_2 = _metaArb_io_in_3_valid_T & _metaArb_io_in_3_valid_T_1; // @[DCache.scala:741:{43,53,56}]
assign metaArb_io_in_3_valid = _metaArb_io_in_3_valid_T_2; // @[DCache.scala:135:28, :741:53]
assign metaArb_io_in_3_bits_idx = _metaArb_io_in_3_bits_idx_T; // @[DCache.scala:135:28, :744:40]
assign _metaArb_io_in_3_bits_addr_T_2 = {_metaArb_io_in_3_bits_addr_T, _metaArb_io_in_3_bits_addr_T_1}; // @[DCache.scala:745:{36,58,80}]
assign metaArb_io_in_3_bits_addr = _metaArb_io_in_3_bits_addr_T_2; // @[DCache.scala:135:28, :745:36]
wire _metaArb_io_in_3_bits_data_c_cat_T_2 = _metaArb_io_in_3_bits_data_c_cat_T | _metaArb_io_in_3_bits_data_c_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _metaArb_io_in_3_bits_data_c_cat_T_4 = _metaArb_io_in_3_bits_data_c_cat_T_2 | _metaArb_io_in_3_bits_data_c_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _metaArb_io_in_3_bits_data_c_cat_T_9 = _metaArb_io_in_3_bits_data_c_cat_T_5 | _metaArb_io_in_3_bits_data_c_cat_T_6; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_10 = _metaArb_io_in_3_bits_data_c_cat_T_9 | _metaArb_io_in_3_bits_data_c_cat_T_7; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_11 = _metaArb_io_in_3_bits_data_c_cat_T_10 | _metaArb_io_in_3_bits_data_c_cat_T_8; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_17 = _metaArb_io_in_3_bits_data_c_cat_T_12 | _metaArb_io_in_3_bits_data_c_cat_T_13; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_18 = _metaArb_io_in_3_bits_data_c_cat_T_17 | _metaArb_io_in_3_bits_data_c_cat_T_14; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_19 = _metaArb_io_in_3_bits_data_c_cat_T_18 | _metaArb_io_in_3_bits_data_c_cat_T_15; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_20 = _metaArb_io_in_3_bits_data_c_cat_T_19 | _metaArb_io_in_3_bits_data_c_cat_T_16; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_21 = _metaArb_io_in_3_bits_data_c_cat_T_11 | _metaArb_io_in_3_bits_data_c_cat_T_20; // @[package.scala:81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_22 = _metaArb_io_in_3_bits_data_c_cat_T_4 | _metaArb_io_in_3_bits_data_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _metaArb_io_in_3_bits_data_c_cat_T_25 = _metaArb_io_in_3_bits_data_c_cat_T_23 | _metaArb_io_in_3_bits_data_c_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _metaArb_io_in_3_bits_data_c_cat_T_27 = _metaArb_io_in_3_bits_data_c_cat_T_25 | _metaArb_io_in_3_bits_data_c_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _metaArb_io_in_3_bits_data_c_cat_T_32 = _metaArb_io_in_3_bits_data_c_cat_T_28 | _metaArb_io_in_3_bits_data_c_cat_T_29; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_33 = _metaArb_io_in_3_bits_data_c_cat_T_32 | _metaArb_io_in_3_bits_data_c_cat_T_30; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_34 = _metaArb_io_in_3_bits_data_c_cat_T_33 | _metaArb_io_in_3_bits_data_c_cat_T_31; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_40 = _metaArb_io_in_3_bits_data_c_cat_T_35 | _metaArb_io_in_3_bits_data_c_cat_T_36; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_41 = _metaArb_io_in_3_bits_data_c_cat_T_40 | _metaArb_io_in_3_bits_data_c_cat_T_37; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_42 = _metaArb_io_in_3_bits_data_c_cat_T_41 | _metaArb_io_in_3_bits_data_c_cat_T_38; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_43 = _metaArb_io_in_3_bits_data_c_cat_T_42 | _metaArb_io_in_3_bits_data_c_cat_T_39; // @[package.scala:16:47, :81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_44 = _metaArb_io_in_3_bits_data_c_cat_T_34 | _metaArb_io_in_3_bits_data_c_cat_T_43; // @[package.scala:81:59]
wire _metaArb_io_in_3_bits_data_c_cat_T_45 = _metaArb_io_in_3_bits_data_c_cat_T_27 | _metaArb_io_in_3_bits_data_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _metaArb_io_in_3_bits_data_c_cat_T_47 = _metaArb_io_in_3_bits_data_c_cat_T_45 | _metaArb_io_in_3_bits_data_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _metaArb_io_in_3_bits_data_c_cat_T_49 = _metaArb_io_in_3_bits_data_c_cat_T_47 | _metaArb_io_in_3_bits_data_c_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] metaArb_io_in_3_bits_data_c = {_metaArb_io_in_3_bits_data_c_cat_T_22, _metaArb_io_in_3_bits_data_c_cat_T_49}; // @[Metadata.scala:29:18]
wire [3:0] _metaArb_io_in_3_bits_data_T_1 = {metaArb_io_in_3_bits_data_c, nodeOut_d_bits_param}; // @[Metadata.scala:29:18, :84:18]
wire _metaArb_io_in_3_bits_data_T_10 = _metaArb_io_in_3_bits_data_T_1 == 4'h1; // @[Metadata.scala:84:{18,38}]
wire [1:0] _metaArb_io_in_3_bits_data_T_11 = {1'h0, _metaArb_io_in_3_bits_data_T_10}; // @[Metadata.scala:84:38]
wire _metaArb_io_in_3_bits_data_T_12 = _metaArb_io_in_3_bits_data_T_1 == 4'h0; // @[Metadata.scala:84:{18,38}]
wire [1:0] _metaArb_io_in_3_bits_data_T_13 = _metaArb_io_in_3_bits_data_T_12 ? 2'h2 : _metaArb_io_in_3_bits_data_T_11; // @[Metadata.scala:84:38]
wire _metaArb_io_in_3_bits_data_T_14 = _metaArb_io_in_3_bits_data_T_1 == 4'h4; // @[Metadata.scala:84:{18,38}]
wire [1:0] _metaArb_io_in_3_bits_data_T_15 = _metaArb_io_in_3_bits_data_T_14 ? 2'h2 : _metaArb_io_in_3_bits_data_T_13; // @[Metadata.scala:84:38]
wire _metaArb_io_in_3_bits_data_T_16 = _metaArb_io_in_3_bits_data_T_1 == 4'hC; // @[Metadata.scala:84:{18,38}]
wire [1:0] _metaArb_io_in_3_bits_data_T_17 = _metaArb_io_in_3_bits_data_T_16 ? 2'h3 : _metaArb_io_in_3_bits_data_T_15; // @[Metadata.scala:84:38]
wire [1:0] metaArb_io_in_3_bits_data_meta_state = _metaArb_io_in_3_bits_data_T_17; // @[Metadata.scala:84:38, :160:20]
wire [1:0] metaArb_io_in_3_bits_data_meta_1_coh_state = metaArb_io_in_3_bits_data_meta_state; // @[Metadata.scala:160:20]
wire [23:0] metaArb_io_in_3_bits_data_meta_1_tag; // @[HellaCache.scala:305:20]
assign metaArb_io_in_3_bits_data_meta_1_tag = _metaArb_io_in_3_bits_data_T[23:0]; // @[HellaCache.scala:305:20, :306:14]
assign _metaArb_io_in_3_bits_data_T_18 = {metaArb_io_in_3_bits_data_meta_1_coh_state, metaArb_io_in_3_bits_data_meta_1_tag}; // @[HellaCache.scala:305:20]
assign metaArb_io_in_3_bits_data = _metaArb_io_in_3_bits_data_T_18; // @[DCache.scala:135:28, :746:134]
reg blockUncachedGrant; // @[DCache.scala:750:33]
wire _T_92 = grantIsUncachedData & (blockUncachedGrant | s1_valid); // @[package.scala:16:47]
assign nodeOut_d_ready = ~(_T_92 | _T_90) & _nodeOut_d_ready_T_3; // @[DCache.scala:671:{18,24}, :722:{23,51}, :724:20, :752:{31,68}, :753:22]
assign io_cpu_req_ready_0 = _T_92 ? ~(nodeOut_d_valid | _T_10 | ~metaArb_io_in_7_ready | ~dataArb_io_in_3_ready) & _io_cpu_req_ready_T_4 : ~(_T_10 | ~metaArb_io_in_7_ready | ~dataArb_io_in_3_ready) & _io_cpu_req_ready_T_4; // @[DCache.scala:101:7, :135:28, :152:28, :195:9, :233:{20,73}, :258:{9,45,64}, :267:{34,53}, :275:{27,53,79,98}, :752:{31,68}, :755:29, :756:26]
wire _GEN_98 = _T_92 & nodeOut_d_valid; // @[DCache.scala:721:26, :752:{31,68}, :755:29, :757:32]
assign dataArb_io_in_1_valid = _GEN_98 | _dataArb_io_in_1_valid_T_1; // @[DCache.scala:152:28, :721:{26,61}, :752:68, :755:29, :757:32]
assign dataArb_io_in_1_bits_write = ~_T_92 | ~nodeOut_d_valid; // @[DCache.scala:152:28, :727:33, :752:{31,68}, :755:29, :758:37]
wire _blockUncachedGrant_T = ~dataArb_io_in_1_ready; // @[DCache.scala:152:28, :722:26, :759:31]
wire _block_probe_for_core_progress_T = |blockProbeAfterGrantCount; // @[DCache.scala:668:42, :669:35, :766:65]
wire block_probe_for_core_progress = _block_probe_for_core_progress_T | lrscValid; // @[DCache.scala:473:29, :766:{65,71}]
wire [31:0] _block_probe_for_pending_release_ack_T = nodeOut_b_bits_address ^ release_ack_addr; // @[DCache.scala:227:29, :767:88]
wire [14:0] _block_probe_for_pending_release_ack_T_1 = _block_probe_for_pending_release_ack_T[20:6]; // @[DCache.scala:767:{88,107}]
wire _block_probe_for_pending_release_ack_T_2 = _block_probe_for_pending_release_ack_T_1 == 15'h0; // @[DCache.scala:582:29, :767:{107,163}]
wire block_probe_for_pending_release_ack = release_ack_wait & _block_probe_for_pending_release_ack_T_2; // @[DCache.scala:226:33, :767:{62,163}]
wire _block_probe_for_ordering_T = releaseInFlight | block_probe_for_pending_release_ack; // @[DCache.scala:334:46, :767:62, :768:50]
wire block_probe_for_ordering = _block_probe_for_ordering_T | grantInProgress; // @[DCache.scala:667:32, :768:{50,89}]
wire _metaArb_io_in_6_valid_T = ~block_probe_for_core_progress; // @[DCache.scala:766:71, :769:48]
wire _metaArb_io_in_6_valid_T_1 = _metaArb_io_in_6_valid_T | lrscBackingOff; // @[DCache.scala:474:40, :769:{48,79}]
wire _metaArb_io_in_6_valid_T_2 = nodeOut_b_valid & _metaArb_io_in_6_valid_T_1; // @[DCache.scala:769:{44,79}]
wire _nodeOut_b_ready_T = block_probe_for_core_progress | block_probe_for_ordering; // @[DCache.scala:766:71, :768:89, :770:79]
wire _nodeOut_b_ready_T_1 = _nodeOut_b_ready_T | s1_valid; // @[DCache.scala:182:25, :770:{79,107}]
wire _nodeOut_b_ready_T_2 = _nodeOut_b_ready_T_1 | s2_valid; // @[DCache.scala:331:25, :770:{107,119}]
wire _nodeOut_b_ready_T_3 = ~_nodeOut_b_ready_T_2; // @[DCache.scala:770:{47,119}]
assign _nodeOut_b_ready_T_4 = metaArb_io_in_6_ready & _nodeOut_b_ready_T_3; // @[DCache.scala:135:28, :770:{44,47}]
assign nodeOut_b_ready = _nodeOut_b_ready_T_4; // @[DCache.scala:770:44]
wire [1:0] _metaArb_io_in_6_bits_idx_T = nodeOut_b_bits_address[7:6]; // @[DCache.scala:1200:47]
wire [7:0] _metaArb_io_in_6_bits_addr_T = io_cpu_req_bits_addr_0[39:32]; // @[DCache.scala:101:7, :773:58]
wire [7:0] _metaArb_io_in_6_bits_addr_T_2 = io_cpu_req_bits_addr_0[39:32]; // @[DCache.scala:101:7, :773:58, :844:62]
wire [39:0] _metaArb_io_in_6_bits_addr_T_1 = {_metaArb_io_in_6_bits_addr_T, nodeOut_b_bits_address}; // @[DCache.scala:773:{36,58}]
assign _s1_victim_way_T = lfsr[1:0]; // @[PRNG.scala:95:17]
assign s1_victim_way = _s1_victim_way_T; // @[package.scala:163:13]
wire _T_132 = nodeOut_c_ready & nodeOut_c_valid; // @[Decoupled.scala:51:35]
wire _releaseRejected_T; // @[Decoupled.scala:51:35]
assign _releaseRejected_T = _T_132; // @[Decoupled.scala:51:35]
wire _io_cpu_perf_release_T; // @[Decoupled.scala:51:35]
assign _io_cpu_perf_release_T = _T_132; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_99 = 27'hFFF << nodeOut_c_bits_size; // @[package.scala:243:71]
wire [26:0] _r_beats1_decode_T_3; // @[package.scala:243:71]
assign _r_beats1_decode_T_3 = _GEN_99; // @[package.scala:243:71]
wire [26:0] _io_cpu_perf_release_beats1_decode_T; // @[package.scala:243:71]
assign _io_cpu_perf_release_beats1_decode_T = _GEN_99; // @[package.scala:243:71]
wire [11:0] _r_beats1_decode_T_4 = _r_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _r_beats1_decode_T_5 = ~_r_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] r_beats1_decode_1 = _r_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire r_beats1_opdata_1 = nodeOut_c_bits_opcode[0]; // @[Edges.scala:102:36]
wire io_cpu_perf_release_beats1_opdata = nodeOut_c_bits_opcode[0]; // @[Edges.scala:102:36]
wire [8:0] r_beats1_1 = r_beats1_opdata_1 ? r_beats1_decode_1 : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [8:0] r_counter_1; // @[Edges.scala:229:27]
wire [9:0] _r_counter1_T_1 = {1'h0, r_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] r_counter1_1 = _r_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire c_first = r_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _r_last_T_2 = r_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _r_last_T_3 = r_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire c_last = _r_last_T_2 | _r_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire releaseDone = c_last & _T_132; // @[Decoupled.scala:51:35]
wire [8:0] _r_count_T_1 = ~r_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] c_count = r_beats1_1 & _r_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _r_counter_T_1 = c_first ? r_beats1_1 : r_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire _releaseRejected_T_2; // @[DCache.scala:803:44]
wire releaseRejected; // @[DCache.scala:800:29]
wire _s1_release_data_valid_T = dataArb_io_in_2_ready & _dataArb_io_in_2_valid_T_1; // @[Decoupled.scala:51:35]
reg s1_release_data_valid; // @[DCache.scala:801:38]
wire _s2_release_data_valid_T = ~releaseRejected; // @[DCache.scala:800:29, :802:64]
wire _s2_release_data_valid_T_1 = s1_release_data_valid & _s2_release_data_valid_T; // @[DCache.scala:801:38, :802:{61,64}]
reg s2_release_data_valid; // @[DCache.scala:802:38]
wire _nodeOut_c_valid_T_3 = s2_release_data_valid; // @[DCache.scala:802:38, :810:44]
wire _releaseRejected_T_1 = ~_releaseRejected_T; // @[Decoupled.scala:51:35]
assign _releaseRejected_T_2 = s2_release_data_valid & _releaseRejected_T_1; // @[DCache.scala:802:38, :803:{44,47}]
assign releaseRejected = _releaseRejected_T_2; // @[DCache.scala:800:29, :803:44]
wire [9:0] _releaseDataBeat_T = {1'h0, c_count}; // @[Edges.scala:234:25]
wire [1:0] _releaseDataBeat_T_1 = {1'h0, s2_release_data_valid}; // @[DCache.scala:802:38, :804:98]
wire [2:0] _releaseDataBeat_T_2 = {2'h0, s1_release_data_valid} + {1'h0, _releaseDataBeat_T_1}; // @[DCache.scala:801:38, :804:{93,98}]
wire [1:0] _releaseDataBeat_T_3 = _releaseDataBeat_T_2[1:0]; // @[DCache.scala:804:93]
wire [1:0] _releaseDataBeat_T_4 = releaseRejected ? 2'h0 : _releaseDataBeat_T_3; // @[DCache.scala:800:29, :804:{48,93}]
wire [10:0] _releaseDataBeat_T_5 = {1'h0, _releaseDataBeat_T} + {9'h0, _releaseDataBeat_T_4}; // @[DCache.scala:804:{28,43,48}]
wire [9:0] releaseDataBeat = _releaseDataBeat_T_5[9:0]; // @[DCache.scala:804:43]
wire _nodeOut_c_valid_T_4 = c_first & release_ack_wait; // @[Edges.scala:231:25]
wire _nodeOut_c_valid_T_5 = ~_nodeOut_c_valid_T_4; // @[DCache.scala:810:{120,130}]
wire _nodeOut_c_valid_T_6 = _nodeOut_c_valid_T_3 & _nodeOut_c_valid_T_5; // @[DCache.scala:810:{44,117,120}]
wire [1:0] newCoh_state; // @[DCache.scala:812:27]
wire [1:0] metaArb_io_in_4_bits_data_meta_coh_state = newCoh_state; // @[HellaCache.scala:305:20]
wire _release_state_T_8 = s2_valid_flush_line | s2_flush_valid; // @[DCache.scala:363:51, :419:75, :817:34, :820:151]
wire _discard_line_T = s2_req_size[1]; // @[DCache.scala:339:19, :818:60]
wire _discard_line_T_1 = s2_valid_flush_line & _discard_line_T; // @[DCache.scala:419:75, :818:{46,60}]
wire _discard_line_T_3 = s2_flush_valid & _discard_line_T_2; // @[DCache.scala:363:51, :818:{82,102}]
wire discard_line = _discard_line_T_1 | _discard_line_T_3; // @[DCache.scala:818:{46,64,82}]
wire _release_state_T = ~discard_line; // @[DCache.scala:818:64, :819:47]
wire _release_state_T_1 = s2_victim_dirty & _release_state_T; // @[Misc.scala:38:9]
wire _release_state_T_3 = ~release_ack_wait; // @[DCache.scala:226:33, :607:47, :820:57]
wire _release_state_T_6 = |s2_victim_state_state; // @[Metadata.scala:50:45]
wire _release_state_T_9 = ~s2_hit_valid; // @[Metadata.scala:50:45]
wire _release_state_T_10 = s2_readwrite & _release_state_T_9; // @[DCache.scala:354:30, :820:{185,188}]
wire _release_state_T_11 = _release_state_T_8 | _release_state_T_10; // @[DCache.scala:820:{151,169,185}]
wire [3:0] _release_state_T_14 = _release_state_T_1 ? 4'h1 : 4'h6; // @[DCache.scala:819:{27,44}]
wire [1:0] _probe_bits_T_1 = s2_req_addr[7:6]; // @[DCache.scala:339:19, :822:76]
wire [25:0] _probe_bits_T_2 = {s2_victim_tag, _probe_bits_T_1}; // @[DCache.scala:433:26, :822:{49,76}]
wire [31:0] _probe_bits_T_3 = {_probe_bits_T_2, 6'h0}; // @[DCache.scala:822:{49,96}]
wire [31:0] probe_bits_res_address = _probe_bits_T_3; // @[DCache.scala:822:96, :1202:19]
wire probeNack; // @[DCache.scala:825:34]
wire [3:0] _release_state_T_15 = {1'h0, releaseDone, 2'h3}; // @[Edges.scala:233:22]
wire _probeNack_T = ~releaseDone; // @[Edges.scala:233:22]
assign probeNack = s2_prb_ack_data | (|s2_probe_state_state) | _probeNack_T; // @[Misc.scala:38:9]
wire [3:0] _release_state_T_16 = releaseDone ? 4'h0 : 4'h5; // @[Edges.scala:233:22]
assign s1_nack = s2_probe ? probeNack | _T_60 | _T_40 | _T_14 : _T_60 | _T_40 | _T_14; // @[DCache.scala:185:28, :276:{39,58,79}, :288:{75,85}, :333:25, :446:{24,82,92}, :571:{18,36,46}, :824:21, :825:34, :839:{24,34}]
wire _T_102 = release_state == 4'h4; // @[DCache.scala:228:30, :841:25]
assign metaArb_io_in_6_valid = _T_102 | _metaArb_io_in_6_valid_T_2; // @[DCache.scala:135:28, :769:{26,44}, :841:{25,44}, :842:30]
assign metaArb_io_in_6_bits_idx = _T_102 ? _metaArb_io_in_6_bits_idx_T_1 : _metaArb_io_in_6_bits_idx_T; // @[DCache.scala:135:28, :772:29, :841:{25,44}, :843:33, :1200:47]
wire [39:0] _metaArb_io_in_6_bits_addr_T_3 = {_metaArb_io_in_6_bits_addr_T_2, probe_bits_address}; // @[DCache.scala:184:29, :844:{40,62}]
assign metaArb_io_in_6_bits_addr = _T_102 ? _metaArb_io_in_6_bits_addr_T_3 : _metaArb_io_in_6_bits_addr_T_1; // @[DCache.scala:135:28, :773:{30,36}, :841:{25,44}, :844:{34,40}]
wire _T_103 = release_state == 4'h5; // @[DCache.scala:228:30, :850:25]
wire _T_104 = release_state == 4'h3; // @[DCache.scala:228:30, :854:25]
assign nodeOut_c_valid = _T_104 | _T_103 | s2_probe & ~s2_prb_ack_data | _nodeOut_c_valid_T_6; // @[Misc.scala:38:9]
wire _GEN_100 = _T_104 | ~(~s2_probe | s2_prb_ack_data | ~(|s2_probe_state_state)); // @[Misc.scala:38:9]
wire _T_110 = _T_106 | _T_107 | _T_111; // @[package.scala:16:47, :81:59]
assign nodeOut_c_bits_opcode = _T_110 ? {2'h3, ~_T_111} : {2'h2, _inWriteback_T_1}; // @[package.scala:16:47, :81:59]
assign nodeOut_c_bits_param = _T_110 ? (_T_111 ? nodeOut_c_bits_c_param : nodeOut_c_bits_c_1_param) : _inWriteback_T_1 ? dirtyReleaseMessage_param : _GEN_100 ? cleanReleaseMessage_param : 3'h5; // @[package.scala:16:47, :81:59]
assign nodeOut_c_bits_size = _T_110 ? 4'h6 : _inWriteback_T_1 ? dirtyReleaseMessage_size : _GEN_100 ? cleanReleaseMessage_size : nackResponseMessage_size; // @[package.scala:16:47, :81:59]
assign newCoh_state = _T_110 ? voluntaryNewCoh_state : probeNewCoh_state; // @[package.scala:81:59]
assign releaseWay = _T_110 ? s2_victim_or_hit_way : s2_probe_way; // @[package.scala:81:59]
wire _dataArb_io_in_2_valid_T = releaseDataBeat < 10'h8; // @[DCache.scala:804:43, :900:60]
assign _dataArb_io_in_2_valid_T_1 = inWriteback & _dataArb_io_in_2_valid_T; // @[package.scala:81:59]
assign dataArb_io_in_2_valid = _dataArb_io_in_2_valid_T_1; // @[DCache.scala:152:28, :900:41]
wire [7:0] _dataArb_io_in_2_bits_addr_T_1 = {_dataArb_io_in_2_bits_addr_T, 6'h0}; // @[DCache.scala:903:55, :1200:47]
wire [2:0] _dataArb_io_in_2_bits_addr_T_2 = releaseDataBeat[2:0]; // @[DCache.scala:804:43, :903:90]
wire [5:0] _dataArb_io_in_2_bits_addr_T_3 = {_dataArb_io_in_2_bits_addr_T_2, 3'h0}; // @[DCache.scala:903:{90,117}]
assign _dataArb_io_in_2_bits_addr_T_4 = {_dataArb_io_in_2_bits_addr_T_1[7:6], _dataArb_io_in_2_bits_addr_T_1[5:0] | _dataArb_io_in_2_bits_addr_T_3}; // @[DCache.scala:903:{55,72,117}]
assign dataArb_io_in_2_bits_addr = _dataArb_io_in_2_bits_addr_T_4; // @[DCache.scala:152:28, :903:72]
wire _metaArb_io_in_4_valid_T_1 = release_state == 4'h7; // @[package.scala:16:47]
assign _metaArb_io_in_4_valid_T_2 = _metaArb_io_in_4_valid_T | _metaArb_io_in_4_valid_T_1; // @[package.scala:16:47, :81:59]
assign metaArb_io_in_4_valid = _metaArb_io_in_4_valid_T_2; // @[package.scala:81:59]
assign metaArb_io_in_4_bits_idx = _metaArb_io_in_4_bits_idx_T; // @[DCache.scala:135:28, :1200:47]
wire [7:0] _metaArb_io_in_4_bits_addr_T_1 = probe_bits_address[7:0]; // @[DCache.scala:184:29, :912:90]
assign _metaArb_io_in_4_bits_addr_T_2 = {_metaArb_io_in_4_bits_addr_T, _metaArb_io_in_4_bits_addr_T_1}; // @[DCache.scala:912:{36,58,90}]
assign metaArb_io_in_4_bits_addr = _metaArb_io_in_4_bits_addr_T_2; // @[DCache.scala:135:28, :912:36]
wire [23:0] _metaArb_io_in_4_bits_data_T = nodeOut_c_bits_address[31:8]; // @[DCache.scala:913:78]
wire [23:0] metaArb_io_in_4_bits_data_meta_tag = _metaArb_io_in_4_bits_data_T; // @[HellaCache.scala:305:20]
assign _metaArb_io_in_4_bits_data_T_1 = {metaArb_io_in_4_bits_data_meta_coh_state, metaArb_io_in_4_bits_data_meta_tag}; // @[HellaCache.scala:305:20]
assign metaArb_io_in_4_bits_data = _metaArb_io_in_4_bits_data_T_1; // @[DCache.scala:135:28, :913:97]
assign metaArb_io_in_5_bits_data = _metaArb_io_in_4_bits_data_T_1; // @[DCache.scala:135:28, :913:97]
assign metaArb_io_in_6_bits_data = _metaArb_io_in_4_bits_data_T_1; // @[DCache.scala:135:28, :913:97]
assign metaArb_io_in_7_bits_data = _metaArb_io_in_4_bits_data_T_1; // @[DCache.scala:135:28, :913:97]
wire _io_cpu_s2_uncached_T = ~s2_hit; // @[Misc.scala:35:9]
assign _io_cpu_s2_uncached_T_1 = s2_uncached & _io_cpu_s2_uncached_T; // @[DCache.scala:424:39, :920:{37,40}]
assign io_cpu_s2_uncached_0 = _io_cpu_s2_uncached_T_1; // @[DCache.scala:101:7, :920:37]
wire _io_cpu_ordered_T = ~s1_req_no_xcpt; // @[DCache.scala:196:25, :929:35]
wire _io_cpu_ordered_T_1 = s1_valid & _io_cpu_ordered_T; // @[DCache.scala:182:25, :929:{32,35}]
wire _io_cpu_ordered_T_2 = ~s2_req_no_xcpt; // @[DCache.scala:339:19, :929:72]
wire _io_cpu_ordered_T_3 = s2_valid & _io_cpu_ordered_T_2; // @[DCache.scala:331:25, :929:{69,72}]
wire _io_cpu_ordered_T_4 = _io_cpu_ordered_T_1 | _io_cpu_ordered_T_3; // @[DCache.scala:929:{32,57,69}]
wire _io_cpu_ordered_T_5 = _io_cpu_ordered_T_4 | cached_grant_wait; // @[DCache.scala:223:34, :929:{57,94}]
wire _io_cpu_ordered_T_7 = _io_cpu_ordered_T_5 | _io_cpu_ordered_T_6; // @[DCache.scala:929:{94,115,142}]
assign _io_cpu_ordered_T_8 = ~_io_cpu_ordered_T_7; // @[DCache.scala:929:{21,115}]
assign io_cpu_ordered_0 = _io_cpu_ordered_T_8; // @[DCache.scala:101:7, :929:21]
wire _io_cpu_store_pending_T_2 = _io_cpu_store_pending_T | _io_cpu_store_pending_T_1; // @[Consts.scala:90:{32,42,49}]
wire _io_cpu_store_pending_T_4 = _io_cpu_store_pending_T_2 | _io_cpu_store_pending_T_3; // @[Consts.scala:90:{42,59,66}]
wire _io_cpu_store_pending_T_9 = _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6; // @[package.scala:16:47, :81:59]
wire _io_cpu_store_pending_T_10 = _io_cpu_store_pending_T_9 | _io_cpu_store_pending_T_7; // @[package.scala:16:47, :81:59]
wire _io_cpu_store_pending_T_11 = _io_cpu_store_pending_T_10 | _io_cpu_store_pending_T_8; // @[package.scala:16:47, :81:59]
wire _io_cpu_store_pending_T_17 = _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13; // @[package.scala:16:47, :81:59]
wire _io_cpu_store_pending_T_18 = _io_cpu_store_pending_T_17 | _io_cpu_store_pending_T_14; // @[package.scala:16:47, :81:59]
wire _io_cpu_store_pending_T_19 = _io_cpu_store_pending_T_18 | _io_cpu_store_pending_T_15; // @[package.scala:16:47, :81:59]
wire _io_cpu_store_pending_T_20 = _io_cpu_store_pending_T_19 | _io_cpu_store_pending_T_16; // @[package.scala:16:47, :81:59]
wire _io_cpu_store_pending_T_21 = _io_cpu_store_pending_T_11 | _io_cpu_store_pending_T_20; // @[package.scala:81:59]
wire _io_cpu_store_pending_T_22 = _io_cpu_store_pending_T_4 | _io_cpu_store_pending_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _io_cpu_store_pending_T_23 = cached_grant_wait & _io_cpu_store_pending_T_22; // @[DCache.scala:223:34, :930:46]
assign _io_cpu_store_pending_T_25 = _io_cpu_store_pending_T_23 | _io_cpu_store_pending_T_24; // @[DCache.scala:930:{46,70,97}]
assign io_cpu_store_pending_0 = _io_cpu_store_pending_T_25; // @[DCache.scala:101:7, :930:70]
wire _s1_xcpt_valid_T = ~s1_req_no_xcpt; // @[DCache.scala:196:25, :929:35, :932:43]
wire _s1_xcpt_valid_T_1 = _tlb_io_req_valid_T_3 & _s1_xcpt_valid_T; // @[DCache.scala:273:40, :932:{40,43}]
wire _s1_xcpt_valid_T_2 = ~s1_nack; // @[DCache.scala:185:28, :187:41, :932:68]
wire s1_xcpt_valid = _s1_xcpt_valid_T_1 & _s1_xcpt_valid_T_2; // @[DCache.scala:932:{40,65,68}]
reg io_cpu_s2_xcpt_REG; // @[DCache.scala:933:32]
wire _io_cpu_s2_xcpt_T_miss = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_miss; // @[DCache.scala:342:24, :933:{24,32}]
wire [31:0] _io_cpu_s2_xcpt_T_paddr = io_cpu_s2_xcpt_REG ? s2_tlb_xcpt_paddr : 32'h0; // @[DCache.scala:342:24, :933:{24,32}]
wire [39:0] _io_cpu_s2_xcpt_T_gpa = io_cpu_s2_xcpt_REG ? s2_tlb_xcpt_gpa : 40'h0; // @[DCache.scala:342:24, :933:{24,32}]
assign _io_cpu_s2_xcpt_T_pf_ld = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_pf_ld; // @[DCache.scala:342:24, :933:{24,32}]
assign _io_cpu_s2_xcpt_T_pf_st = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_pf_st; // @[DCache.scala:342:24, :933:{24,32}]
wire _io_cpu_s2_xcpt_T_pf_inst = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_pf_inst; // @[DCache.scala:342:24, :933:{24,32}]
assign _io_cpu_s2_xcpt_T_ae_ld = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ae_ld; // @[DCache.scala:342:24, :933:{24,32}]
assign _io_cpu_s2_xcpt_T_ae_st = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ae_st; // @[DCache.scala:342:24, :933:{24,32}]
wire _io_cpu_s2_xcpt_T_ae_inst = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ae_inst; // @[DCache.scala:342:24, :933:{24,32}]
assign _io_cpu_s2_xcpt_T_ma_ld = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ma_ld; // @[DCache.scala:342:24, :933:{24,32}]
assign _io_cpu_s2_xcpt_T_ma_st = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ma_st; // @[DCache.scala:342:24, :933:{24,32}]
wire _io_cpu_s2_xcpt_T_cacheable = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_cacheable; // @[DCache.scala:342:24, :933:{24,32}]
wire _io_cpu_s2_xcpt_T_must_alloc = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_must_alloc; // @[DCache.scala:342:24, :933:{24,32}]
wire _io_cpu_s2_xcpt_T_prefetchable = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_prefetchable; // @[DCache.scala:342:24, :933:{24,32}]
wire [1:0] _io_cpu_s2_xcpt_T_size = io_cpu_s2_xcpt_REG ? s2_tlb_xcpt_size : 2'h0; // @[DCache.scala:342:24, :933:{24,32}]
wire [4:0] _io_cpu_s2_xcpt_T_cmd = io_cpu_s2_xcpt_REG ? s2_tlb_xcpt_cmd : 5'h0; // @[DCache.scala:342:24, :933:{24,32}]
assign io_cpu_s2_xcpt_pf_ld_0 = _io_cpu_s2_xcpt_T_pf_ld; // @[DCache.scala:101:7, :933:24]
assign io_cpu_s2_xcpt_pf_st_0 = _io_cpu_s2_xcpt_T_pf_st; // @[DCache.scala:101:7, :933:24]
assign io_cpu_s2_xcpt_ae_ld_0 = _io_cpu_s2_xcpt_T_ae_ld; // @[DCache.scala:101:7, :933:24]
assign io_cpu_s2_xcpt_ae_st_0 = _io_cpu_s2_xcpt_T_ae_st; // @[DCache.scala:101:7, :933:24]
assign io_cpu_s2_xcpt_ma_ld_0 = _io_cpu_s2_xcpt_T_ma_ld; // @[DCache.scala:101:7, :933:24]
assign io_cpu_s2_xcpt_ma_st_0 = _io_cpu_s2_xcpt_T_ma_st; // @[DCache.scala:101:7, :933:24]
reg [63:0] s2_uncached_data_word; // @[DCache.scala:947:40]
reg doUncachedResp; // @[DCache.scala:948:31]
assign io_cpu_resp_bits_replay_0 = doUncachedResp; // @[DCache.scala:101:7, :948:31]
wire _io_cpu_resp_valid_T = s2_valid_hit_pre_data_ecc | doUncachedResp; // @[DCache.scala:420:69, :948:31, :949:51]
assign _io_cpu_resp_valid_T_2 = _io_cpu_resp_valid_T; // @[DCache.scala:949:{51,70}]
assign io_cpu_resp_valid_0 = _io_cpu_resp_valid_T_2; // @[DCache.scala:101:7, :949:70]
wire _io_cpu_replay_next_T_1 = _io_cpu_replay_next_T & grantIsUncachedData; // @[Decoupled.scala:51:35]
assign _io_cpu_replay_next_T_3 = _io_cpu_replay_next_T_1; // @[DCache.scala:950:{39,62}]
assign io_cpu_replay_next_0 = _io_cpu_replay_next_T_3; // @[DCache.scala:101:7, :950:62]
assign io_cpu_resp_bits_addr_0 = doUncachedResp ? s2_uncached_resp_addr : s2_req_addr; // @[DCache.scala:101:7, :339:19, :344:34, :917:37, :948:31, :951:25, :954:27]
assign io_cpu_resp_bits_data_raw_0 = s2_data_word; // @[DCache.scala:101:7, :970:80]
wire [63:0] s2_data_word_possibly_uncached = s2_data_word; // @[DCache.scala:970:80, :972:120]
wire [31:0] _io_cpu_resp_bits_data_shifted_T_1 = s2_data_word_possibly_uncached[63:32]; // @[DCache.scala:972:120]
wire [31:0] _io_cpu_resp_bits_data_T_5 = s2_data_word_possibly_uncached[63:32]; // @[DCache.scala:972:120]
wire [31:0] _io_cpu_resp_bits_data_word_bypass_shifted_T_1 = s2_data_word_possibly_uncached[63:32]; // @[DCache.scala:972:120]
wire [31:0] _io_cpu_resp_bits_data_word_bypass_T_5 = s2_data_word_possibly_uncached[63:32]; // @[DCache.scala:972:120]
wire [31:0] _io_cpu_resp_bits_data_shifted_T_2 = s2_data_word_possibly_uncached[31:0]; // @[DCache.scala:972:120]
wire [31:0] _io_cpu_resp_bits_data_word_bypass_shifted_T_2 = s2_data_word_possibly_uncached[31:0]; // @[DCache.scala:972:120]
wire [31:0] io_cpu_resp_bits_data_shifted = _io_cpu_resp_bits_data_shifted_T ? _io_cpu_resp_bits_data_shifted_T_1 : _io_cpu_resp_bits_data_shifted_T_2; // @[AMOALU.scala:42:{24,29,37,55}]
wire [31:0] io_cpu_resp_bits_data_zeroed = io_cpu_resp_bits_data_shifted; // @[AMOALU.scala:42:24, :44:23]
wire _GEN_101 = size == 2'h2; // @[AMOALU.scala:11:18, :45:26]
wire _io_cpu_resp_bits_data_T; // @[AMOALU.scala:45:26]
assign _io_cpu_resp_bits_data_T = _GEN_101; // @[AMOALU.scala:45:26]
wire _io_cpu_resp_bits_data_word_bypass_T; // @[AMOALU.scala:45:26]
assign _io_cpu_resp_bits_data_word_bypass_T = _GEN_101; // @[AMOALU.scala:45:26]
wire _io_cpu_resp_bits_data_T_1 = _io_cpu_resp_bits_data_T; // @[AMOALU.scala:45:{26,34}]
wire _io_cpu_resp_bits_data_T_2 = io_cpu_resp_bits_data_zeroed[31]; // @[AMOALU.scala:44:23, :45:81]
wire _io_cpu_resp_bits_data_T_3 = s2_req_signed & _io_cpu_resp_bits_data_T_2; // @[DCache.scala:339:19]
wire [31:0] _io_cpu_resp_bits_data_T_4 = {32{_io_cpu_resp_bits_data_T_3}}; // @[AMOALU.scala:45:{49,72}]
wire [31:0] _io_cpu_resp_bits_data_T_6 = _io_cpu_resp_bits_data_T_1 ? _io_cpu_resp_bits_data_T_4 : _io_cpu_resp_bits_data_T_5; // @[AMOALU.scala:45:{20,34,49,94}]
wire [63:0] _io_cpu_resp_bits_data_T_7 = {_io_cpu_resp_bits_data_T_6, io_cpu_resp_bits_data_zeroed}; // @[AMOALU.scala:44:23, :45:{16,20}]
wire [15:0] _io_cpu_resp_bits_data_shifted_T_4 = _io_cpu_resp_bits_data_T_7[31:16]; // @[AMOALU.scala:42:37, :45:16]
wire [15:0] _io_cpu_resp_bits_data_shifted_T_5 = _io_cpu_resp_bits_data_T_7[15:0]; // @[AMOALU.scala:42:55, :45:16]
wire [15:0] io_cpu_resp_bits_data_shifted_1 = _io_cpu_resp_bits_data_shifted_T_3 ? _io_cpu_resp_bits_data_shifted_T_4 : _io_cpu_resp_bits_data_shifted_T_5; // @[AMOALU.scala:42:{24,29,37,55}]
wire [15:0] io_cpu_resp_bits_data_zeroed_1 = io_cpu_resp_bits_data_shifted_1; // @[AMOALU.scala:42:24, :44:23]
wire _io_cpu_resp_bits_data_T_8 = size == 2'h1; // @[AMOALU.scala:11:18, :45:26]
wire _io_cpu_resp_bits_data_T_9 = _io_cpu_resp_bits_data_T_8; // @[AMOALU.scala:45:{26,34}]
wire _io_cpu_resp_bits_data_T_10 = io_cpu_resp_bits_data_zeroed_1[15]; // @[AMOALU.scala:44:23, :45:81]
wire _io_cpu_resp_bits_data_T_11 = s2_req_signed & _io_cpu_resp_bits_data_T_10; // @[DCache.scala:339:19]
wire [47:0] _io_cpu_resp_bits_data_T_12 = {48{_io_cpu_resp_bits_data_T_11}}; // @[AMOALU.scala:45:{49,72}]
wire [47:0] _io_cpu_resp_bits_data_T_13 = _io_cpu_resp_bits_data_T_7[63:16]; // @[AMOALU.scala:45:{16,94}]
wire [47:0] _io_cpu_resp_bits_data_T_14 = _io_cpu_resp_bits_data_T_9 ? _io_cpu_resp_bits_data_T_12 : _io_cpu_resp_bits_data_T_13; // @[AMOALU.scala:45:{20,34,49,94}]
wire [63:0] _io_cpu_resp_bits_data_T_15 = {_io_cpu_resp_bits_data_T_14, io_cpu_resp_bits_data_zeroed_1}; // @[AMOALU.scala:44:23, :45:{16,20}]
wire [7:0] _io_cpu_resp_bits_data_shifted_T_7 = _io_cpu_resp_bits_data_T_15[15:8]; // @[AMOALU.scala:42:37, :45:16]
wire [7:0] _io_cpu_resp_bits_data_shifted_T_8 = _io_cpu_resp_bits_data_T_15[7:0]; // @[AMOALU.scala:42:55, :45:16]
wire [7:0] io_cpu_resp_bits_data_shifted_2 = _io_cpu_resp_bits_data_shifted_T_6 ? _io_cpu_resp_bits_data_shifted_T_7 : _io_cpu_resp_bits_data_shifted_T_8; // @[AMOALU.scala:42:{24,29,37,55}]
wire [7:0] io_cpu_resp_bits_data_zeroed_2 = io_cpu_resp_bits_data_shifted_2; // @[AMOALU.scala:42:24, :44:23]
wire _io_cpu_resp_bits_data_T_16 = size == 2'h0; // @[AMOALU.scala:11:18, :45:26]
wire _io_cpu_resp_bits_data_T_17 = _io_cpu_resp_bits_data_T_16; // @[AMOALU.scala:45:{26,34}]
wire _io_cpu_resp_bits_data_T_18 = io_cpu_resp_bits_data_zeroed_2[7]; // @[AMOALU.scala:44:23, :45:81]
wire _io_cpu_resp_bits_data_T_19 = s2_req_signed & _io_cpu_resp_bits_data_T_18; // @[DCache.scala:339:19]
wire [55:0] _io_cpu_resp_bits_data_T_20 = {56{_io_cpu_resp_bits_data_T_19}}; // @[AMOALU.scala:45:{49,72}]
wire [55:0] _io_cpu_resp_bits_data_T_21 = _io_cpu_resp_bits_data_T_15[63:8]; // @[AMOALU.scala:45:{16,94}]
wire [55:0] _io_cpu_resp_bits_data_T_22 = _io_cpu_resp_bits_data_T_17 ? _io_cpu_resp_bits_data_T_20 : _io_cpu_resp_bits_data_T_21; // @[AMOALU.scala:45:{20,34,49,94}]
wire [63:0] _io_cpu_resp_bits_data_T_23 = {_io_cpu_resp_bits_data_T_22, io_cpu_resp_bits_data_zeroed_2}; // @[AMOALU.scala:44:23, :45:{16,20}]
assign _io_cpu_resp_bits_data_T_24 = _io_cpu_resp_bits_data_T_23; // @[DCache.scala:974:41]
assign io_cpu_resp_bits_data_0 = _io_cpu_resp_bits_data_T_24; // @[DCache.scala:101:7, :974:41]
wire [31:0] io_cpu_resp_bits_data_word_bypass_shifted = _io_cpu_resp_bits_data_word_bypass_shifted_T ? _io_cpu_resp_bits_data_word_bypass_shifted_T_1 : _io_cpu_resp_bits_data_word_bypass_shifted_T_2; // @[AMOALU.scala:42:{24,29,37,55}]
wire [31:0] io_cpu_resp_bits_data_word_bypass_zeroed = io_cpu_resp_bits_data_word_bypass_shifted; // @[AMOALU.scala:42:24, :44:23]
wire _io_cpu_resp_bits_data_word_bypass_T_1 = _io_cpu_resp_bits_data_word_bypass_T; // @[AMOALU.scala:45:{26,34}]
wire _io_cpu_resp_bits_data_word_bypass_T_2 = io_cpu_resp_bits_data_word_bypass_zeroed[31]; // @[AMOALU.scala:44:23, :45:81]
wire _io_cpu_resp_bits_data_word_bypass_T_3 = s2_req_signed & _io_cpu_resp_bits_data_word_bypass_T_2; // @[DCache.scala:339:19]
wire [31:0] _io_cpu_resp_bits_data_word_bypass_T_4 = {32{_io_cpu_resp_bits_data_word_bypass_T_3}}; // @[AMOALU.scala:45:{49,72}]
wire [31:0] _io_cpu_resp_bits_data_word_bypass_T_6 = _io_cpu_resp_bits_data_word_bypass_T_1 ? _io_cpu_resp_bits_data_word_bypass_T_4 : _io_cpu_resp_bits_data_word_bypass_T_5; // @[AMOALU.scala:45:{20,34,49,94}]
assign _io_cpu_resp_bits_data_word_bypass_T_7 = {_io_cpu_resp_bits_data_word_bypass_T_6, io_cpu_resp_bits_data_word_bypass_zeroed}; // @[AMOALU.scala:44:23, :45:{16,20}]
assign io_cpu_resp_bits_data_word_bypass_0 = _io_cpu_resp_bits_data_word_bypass_T_7; // @[DCache.scala:101:7] |
Generate the Verilog code corresponding to the following Chisel files.
File AccumulatorMem.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
class AccumulatorReadReq[T <: Data: Arithmetic, U <: Data](n: Int, acc_t: T, scale_t: U) extends Bundle {
val addr = UInt(log2Ceil(n).W)
val scale = scale_t
val igelu_qb = acc_t.cloneType
val igelu_qc = acc_t.cloneType
val iexp_qln2 = acc_t.cloneType
val iexp_qln2_inv = acc_t.cloneType
val act = UInt(Activation.bitwidth.W) // TODO magic number
val full = Bool() // Whether or not we return the full bitwidth output
val fromDMA = Bool()
}
class AccumulatorReadResp[T <: Data: Arithmetic, U <: Data](fullDataType: Vec[Vec[T]], scale_t: U) extends Bundle {
val data = fullDataType.cloneType
val fromDMA = Bool()
val scale = scale_t.cloneType
val igelu_qb = fullDataType.head.head.cloneType
val igelu_qc = fullDataType.head.head.cloneType
val iexp_qln2 = fullDataType.head.head.cloneType
val iexp_qln2_inv = fullDataType.head.head.cloneType
val act = UInt(Activation.bitwidth.W) // TODO magic number
val acc_bank_id = UInt(2.W) // TODO magic number
}
class AccumulatorReadIO[T <: Data: Arithmetic, U <: Data](n: Int, fullDataType: Vec[Vec[T]], scale_t: U) extends Bundle {
val req = Decoupled(new AccumulatorReadReq[T, U](n, fullDataType.head.head.cloneType, scale_t))
val resp = Flipped(Decoupled(new AccumulatorReadResp[T, U](fullDataType, scale_t)))
}
class AccumulatorWriteReq[T <: Data: Arithmetic](n: Int, t: Vec[Vec[T]]) extends Bundle {
val addr = UInt(log2Up(n).W)
val data = t.cloneType
val acc = Bool()
val mask = Vec(t.getWidth / 8, Bool()) // TODO Use aligned_to here
}
class AccumulatorMemIO [T <: Data: Arithmetic, U <: Data](n: Int, t: Vec[Vec[T]], scale_t: U,
acc_sub_banks: Int, use_shared_ext_mem: Boolean
) extends Bundle {
val read = Flipped(new AccumulatorReadIO(n, t, scale_t))
val write = Flipped(Decoupled(new AccumulatorWriteReq(n, t)))
val ext_mem = if (use_shared_ext_mem) Some(Vec(acc_sub_banks, new ExtMemIO)) else None
val adder = new Bundle {
val valid = Output(Bool())
val op1 = Output(t.cloneType)
val op2 = Output(t.cloneType)
val sum = Input(t.cloneType)
}
}
class AccPipe[T <: Data : Arithmetic](latency: Int, t: T)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val op1 = Input(t.cloneType)
val op2 = Input(t.cloneType)
val sum = Output(t.cloneType)
})
import ev._
io.sum := ShiftRegister(io.op1 + io.op2, latency)
}
class AccPipeShared[T <: Data : Arithmetic](latency: Int, t: Vec[Vec[T]], banks: Int) extends Module {
val io = IO(new Bundle {
val in_sel = Input(Vec(banks, Bool()))
val ina = Input(Vec(banks, t.cloneType))
val inb = Input(Vec(banks, t.cloneType))
val out = Output(t.cloneType)
})
val ina = Mux1H(io.in_sel, io.ina)
val inb = Mux1H(io.in_sel, io.inb)
io.out := VecInit((ina zip inb).map { case (rv, wv) =>
VecInit((rv zip wv).map { case (re, we) =>
val m = Module(new AccPipe(latency, t.head.head.cloneType))
m.io.op1 := re
m.io.op2 := we
m.io.sum
})
})
}
class AccumulatorMem[T <: Data, U <: Data](
n: Int, t: Vec[Vec[T]], scale_func: (T, U) => T, scale_t: U,
acc_singleported: Boolean, acc_sub_banks: Int,
use_shared_ext_mem: Boolean,
acc_latency: Int, acc_type: T, is_dummy: Boolean
)
(implicit ev: Arithmetic[T]) extends Module {
// TODO Do writes in this module work with matrices of size 2? If we try to read from an address right after writing
// to it, then we might not get the written data. We might need some kind of cooldown counter after addresses in the
// accumulator have been written to for configurations with such small matrices
// TODO make a new aligned_to variable specifically for AccumulatorMem. We should assume that inputs are at least
// accType.getWidth/8 aligned, because it won't make sense to do matrix additions directly in the DMA otherwise.
import ev._
// TODO unify this with TwoPortSyncMemIO
val io = IO(new AccumulatorMemIO(n, t, scale_t, acc_sub_banks, use_shared_ext_mem))
require (acc_latency >= 2)
val pipelined_writes = Reg(Vec(acc_latency, Valid(new AccumulatorWriteReq(n, t))))
val oldest_pipelined_write = pipelined_writes(acc_latency-1)
pipelined_writes(0).valid := io.write.fire
pipelined_writes(0).bits := io.write.bits
for (i <- 1 until acc_latency) {
pipelined_writes(i) := pipelined_writes(i-1)
}
val rdata_for_adder = Wire(t)
rdata_for_adder := DontCare
val rdata_for_read_resp = Wire(t)
rdata_for_read_resp := DontCare
val adder_sum = io.adder.sum
io.adder.valid := pipelined_writes(0).valid && pipelined_writes(0).bits.acc
io.adder.op1 := rdata_for_adder
io.adder.op2 := pipelined_writes(0).bits.data
val block_read_req = WireInit(false.B)
val block_write_req = WireInit(false.B)
val mask_len = t.getWidth / 8
val mask_elem = UInt((t.getWidth / mask_len).W)
if (!acc_singleported && !is_dummy) {
require(!use_shared_ext_mem)
val mem = TwoPortSyncMem(n, t, mask_len) // TODO We assume byte-alignment here. Use aligned_to instead
mem.io.waddr := oldest_pipelined_write.bits.addr
mem.io.wen := oldest_pipelined_write.valid
mem.io.wdata := Mux(oldest_pipelined_write.bits.acc, adder_sum, oldest_pipelined_write.bits.data)
mem.io.mask := oldest_pipelined_write.bits.mask
rdata_for_adder := mem.io.rdata
rdata_for_read_resp := mem.io.rdata
mem.io.raddr := Mux(io.write.fire && io.write.bits.acc, io.write.bits.addr, io.read.req.bits.addr)
mem.io.ren := io.read.req.fire || (io.write.fire && io.write.bits.acc)
} else if (!is_dummy) {
val rmw_req = Wire(Decoupled(UInt()))
rmw_req.valid := io.write.valid && io.write.bits.acc
rmw_req.bits := io.write.bits.addr
rmw_req.ready := true.B
block_write_req := !rmw_req.ready
val only_read_req = Wire(Decoupled(UInt()))
only_read_req.valid := io.read.req.valid
only_read_req.bits := io.read.req.bits.addr
only_read_req.ready := true.B
block_read_req := !only_read_req.ready
for (i <- 0 until acc_sub_banks) {
def isThisBank(addr: UInt) = addr(log2Ceil(acc_sub_banks)-1,0) === i.U
def getBankIdx(addr: UInt) = addr >> log2Ceil(acc_sub_banks)
val (read, write) = if (use_shared_ext_mem) {
def read(addr: UInt, ren: Bool): Data = {
io.ext_mem.get(i).read_en := ren
io.ext_mem.get(i).read_addr := addr
io.ext_mem.get(i).read_data
}
io.ext_mem.get(i).write_en := false.B
io.ext_mem.get(i).write_addr := DontCare
io.ext_mem.get(i).write_data := DontCare
io.ext_mem.get(i).write_mask := DontCare
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = {
io.ext_mem.get(i).write_en := true.B
io.ext_mem.get(i).write_addr := addr
io.ext_mem.get(i).write_data := wdata.asUInt
io.ext_mem.get(i).write_mask := wmask.asUInt
}
(read _, write _)
} else {
val mem = SyncReadMem(n / acc_sub_banks, Vec(mask_len, mask_elem))
def read(addr: UInt, ren: Bool): Data = mem.read(addr, ren)
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = mem.write(addr, wdata, wmask)
(read _, write _)
}
val ren = WireInit(false.B)
val raddr = WireInit(getBankIdx(rmw_req.bits))
val nEntries = 3
// Writes coming 2 cycles after read leads to bad bank behavior
// Add another buffer here
class W_Q_Entry[T <: Data](mask_len: Int, mask_elem: T) extends Bundle {
val valid = Bool()
val data = Vec(mask_len, mask_elem)
val mask = Vec(mask_len, Bool())
val addr = UInt(log2Ceil(n/acc_sub_banks).W)
}
val w_q = Reg(Vec(nEntries, new W_Q_Entry(mask_len, mask_elem)))
for (e <- w_q) {
when (e.valid) {
assert(!(
io.write.fire && io.write.bits.acc &&
isThisBank(io.write.bits.addr) && getBankIdx(io.write.bits.addr) === e.addr &&
((io.write.bits.mask.asUInt & e.mask.asUInt) =/= 0.U)
), "you cannot accumulate to an AccumulatorMem address until previous writes to that address have completed")
when (io.write.bits.acc && isThisBank(io.write.bits.addr) && getBankIdx(io.write.bits.addr) === e.addr) {
rmw_req.ready := false.B
}
when (isThisBank(io.read.req.bits.addr) && getBankIdx(io.read.req.bits.addr) === e.addr) {
only_read_req.ready := false.B
}
}
}
val w_q_head = RegInit(1.U(nEntries.W))
val w_q_tail = RegInit(1.U(nEntries.W))
val w_q_full = (w_q_tail.asBools zip w_q.map(_.valid)).map({ case (h,v) => h && v }).reduce(_||_)
val w_q_empty = !(w_q_head.asBools zip w_q.map(_.valid)).map({ case (h,v) => h && v }).reduce(_||_)
val wen = WireInit(false.B)
val wdata = Mux1H(w_q_head.asBools, w_q.map(_.data))
val wmask = Mux1H(w_q_head.asBools, w_q.map(_.mask))
val waddr = Mux1H(w_q_head.asBools, w_q.map(_.addr))
when (wen) {
w_q_head := (w_q_head << 1).asUInt | w_q_head(nEntries-1)
for (i <- 0 until nEntries) {
when (w_q_head(i)) {
w_q(i).valid := false.B
}
}
}
val w_q_push = oldest_pipelined_write.valid && isThisBank(oldest_pipelined_write.bits.addr)
when (w_q_push) {
assert(!w_q_full || wen, "we ran out of acc-sub-bank write q entries")
w_q_tail := (w_q_tail << 1).asUInt | w_q_tail(nEntries-1)
for (i <- 0 until nEntries) {
when (w_q_tail(i)) {
w_q(i).valid := true.B
w_q(i).data := Mux(oldest_pipelined_write.bits.acc, adder_sum, oldest_pipelined_write.bits.data).asTypeOf(Vec(mask_len, mask_elem))
w_q(i).mask := oldest_pipelined_write.bits.mask
w_q(i).addr := getBankIdx(oldest_pipelined_write.bits.addr)
}
}
}
val bank_rdata = read(raddr, ren && !wen).asTypeOf(t)
when (RegNext(ren && rmw_req.valid && isThisBank(rmw_req.bits))) {
rdata_for_adder := bank_rdata
} .elsewhen (RegNext(ren)) {
rdata_for_read_resp := bank_rdata
}
when (wen) {
write(waddr, wdata, wmask)
}
// Three requestors, 1 slot
// Priority is (in descending order):
// 1. incoming reads for RMW
// 2. writes from RMW
// 3. incoming reads
when (rmw_req.fire && isThisBank(rmw_req.bits)) {
ren := true.B
when (isThisBank(only_read_req.bits)) {
only_read_req.ready := false.B
}
} .elsewhen (!w_q_empty) {
wen := true.B
when (isThisBank(only_read_req.bits)) {
only_read_req.ready := false.B
}
} .otherwise {
ren := isThisBank(only_read_req.bits) && only_read_req.fire
raddr := getBankIdx(only_read_req.bits)
}
when (reset.asBool) {
w_q.foreach(_.valid := false.B)
}
}
}
val q = Module(new Queue(new AccumulatorReadResp(t, scale_t), 1, true, true))
q.io.enq.bits.data := rdata_for_read_resp
if (is_dummy) {
rdata_for_read_resp := DontCare
rdata_for_adder := DontCare
}
q.io.enq.bits.scale := RegNext(io.read.req.bits.scale)
q.io.enq.bits.igelu_qb := RegNext(io.read.req.bits.igelu_qb)
q.io.enq.bits.igelu_qc := RegNext(io.read.req.bits.igelu_qc)
q.io.enq.bits.iexp_qln2 := RegNext(io.read.req.bits.iexp_qln2)
q.io.enq.bits.iexp_qln2_inv := RegNext(io.read.req.bits.iexp_qln2_inv)
q.io.enq.bits.act := RegNext(io.read.req.bits.act)
q.io.enq.bits.fromDMA := RegNext(io.read.req.bits.fromDMA)
q.io.enq.bits.acc_bank_id := DontCare
q.io.enq.valid := RegNext(io.read.req.fire)
val p = q.io.deq
io.read.resp.bits.data := p.bits.data
io.read.resp.bits.fromDMA := p.bits.fromDMA
io.read.resp.bits.igelu_qb := p.bits.igelu_qb
io.read.resp.bits.igelu_qc := p.bits.igelu_qc
io.read.resp.bits.iexp_qln2 := p.bits.iexp_qln2
io.read.resp.bits.iexp_qln2_inv := p.bits.iexp_qln2_inv
io.read.resp.bits.act := p.bits.act
io.read.resp.bits.scale := p.bits.scale
io.read.resp.bits.acc_bank_id := DontCare // This is set in Scratchpad
io.read.resp.valid := p.valid
p.ready := io.read.resp.ready
val q_will_be_empty = (q.io.count +& q.io.enq.fire) - q.io.deq.fire === 0.U
io.read.req.ready := q_will_be_empty && (
// Make sure we aren't accumulating, which would take over both ports
!(io.write.valid && io.write.bits.acc) &&
!pipelined_writes.map(r => r.valid && r.bits.addr === io.read.req.bits.addr).reduce(_||_) &&
!block_read_req
)
io.write.ready := !block_write_req &&
!pipelined_writes.map(r => r.valid && r.bits.addr === io.write.bits.addr && io.write.bits.acc).reduce(_||_)
when (reset.asBool) {
pipelined_writes.foreach(_.valid := false.B)
}
// assert(!(io.read.req.valid && io.write.en && io.write.acc), "reading and accumulating simultaneously is not supported")
assert(!(io.read.req.fire && io.write.fire && io.read.req.bits.addr === io.write.bits.addr), "reading from and writing to same address is not supported")
}
File SyncMem.scala:
package gemmini
import chisel3._
import chisel3.util._
class SinglePortedSyncMemIO[T <: Data](n: Int, t: T) extends Bundle {
val addr = Input(UInt((log2Ceil(n) max 1).W))
val wdata = Input(t)
val rdata = Output(t)
val wen = Input(Bool())
val ren = Input(Bool())
}
class SinglePortSyncMem[T <: Data](n: Int, t: T) extends Module {
val io = IO(new SinglePortedSyncMemIO(n, t))
assert(!(io.ren && io.wen), "undefined behavior in single-ported SRAM")
val mem = SyncReadMem(n, t)
when (io.wen) {
mem.write(io.addr, io.wdata)
io.rdata := DontCare
}.otherwise {
io.rdata := mem.read(io.addr, io.ren)
}
}
class TwoPortSyncMem[T <: Data](n: Int, t: T, mask_len: Int) extends Module {
val io = IO(new Bundle {
val waddr = Input(UInt((log2Ceil(n) max 1).W))
val raddr = Input(UInt((log2Ceil(n) max 1).W))
val wdata = Input(t)
val rdata = Output(t)
val wen = Input(Bool())
val ren = Input(Bool())
val mask = Input(Vec(mask_len, Bool()))
})
assert(!(io.wen && io.ren && io.raddr === io.waddr), "undefined behavior in dual-ported SRAM")
// val mem = SyncReadMem(n, t)
val mask_elem = UInt((t.getWidth / mask_len).W)
val mem = SyncReadMem(n, Vec(mask_len, mask_elem))
io.rdata := mem.read(io.raddr, io.ren).asTypeOf(t)
when (io.wen) {
mem.write(io.waddr, io.wdata.asTypeOf(Vec(mask_len, mask_elem)), io.mask)
}
}
class SplitSinglePortSyncMem[T <: Data](n: Int, t: T, splits: Int) extends Module {
val io = IO(new Bundle {
val waddr = Input(UInt((log2Ceil(n) max 1).W))
val raddr = Input(UInt((log2Ceil(n) max 1).W))
val wdata = Input(t)
val rdata = Output(t)
val wen = Input(Bool())
val ren = Input(Bool())
})
val lens = n / splits
val last_len = n - (splits-1)*lens
def is_in_range(addr: UInt, i: Int) = {
if (i == splits-1)
addr >= (i*lens).U
else
addr >= (i*lens).U && addr < ((i+1)*lens).U
}
def split_addr(addr: UInt, i: Int) = {
addr - (i*lens).U
}
val srams = Seq.fill(splits-1)(SinglePortSyncMem(lens, t).io) :+ SinglePortSyncMem(last_len, t).io
val output_split = Reg(UInt((log2Ceil(splits) max 1).W))
io.rdata := DontCare
srams.zipWithIndex.foreach { case (sr, i) =>
sr.addr := Mux(sr.ren, split_addr(io.raddr, i), split_addr(io.waddr, i))
sr.wdata := io.wdata
sr.ren := io.ren && is_in_range(io.raddr, i)
sr.wen := io.wen && is_in_range(io.waddr, i)
when (sr.ren) {
output_split := i.U
}
// This is an awkward Chisel Vec error workaround
when (output_split === i.U) {
io.rdata := sr.rdata
}
}
}
object SinglePortSyncMem {
def apply[T <: Data](n: Int, t: T): SinglePortSyncMem[T] = Module(new SinglePortSyncMem(n, t))
}
object TwoPortSyncMem {
def apply[T <: Data](n: Int, t: T, mask_len: Int): TwoPortSyncMem[T] = Module(new TwoPortSyncMem(n, t, mask_len))
}
object SplitSinglePortSyncMem {
def apply[T <: Data](n: Int, t: T, splits: Int): SplitSinglePortSyncMem[T] = Module(new SplitSinglePortSyncMem(n, t, splits))
}
| module AccumulatorMem( // @[AccumulatorMem.scala:92:7]
input clock, // @[AccumulatorMem.scala:92:7]
input reset, // @[AccumulatorMem.scala:92:7]
output io_read_req_ready, // @[AccumulatorMem.scala:109:14]
input io_read_req_valid, // @[AccumulatorMem.scala:109:14]
input [31:0] io_read_req_bits_scale_bits, // @[AccumulatorMem.scala:109:14]
input [8:0] io_read_req_bits_addr, // @[AccumulatorMem.scala:109:14]
input [31:0] io_read_req_bits_igelu_qb, // @[AccumulatorMem.scala:109:14]
input [31:0] io_read_req_bits_igelu_qc, // @[AccumulatorMem.scala:109:14]
input [31:0] io_read_req_bits_iexp_qln2, // @[AccumulatorMem.scala:109:14]
input [31:0] io_read_req_bits_iexp_qln2_inv, // @[AccumulatorMem.scala:109:14]
input [2:0] io_read_req_bits_act, // @[AccumulatorMem.scala:109:14]
input io_read_req_bits_full, // @[AccumulatorMem.scala:109:14]
input io_read_resp_ready, // @[AccumulatorMem.scala:109:14]
output io_read_resp_valid, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_0_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_1_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_2_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_3_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_4_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_5_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_6_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_7_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_8_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_9_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_10_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_11_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_12_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_13_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_14_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_data_15_0, // @[AccumulatorMem.scala:109:14]
output io_read_resp_bits_fromDMA, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_scale_bits, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_igelu_qb, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_igelu_qc, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_iexp_qln2, // @[AccumulatorMem.scala:109:14]
output [31:0] io_read_resp_bits_iexp_qln2_inv, // @[AccumulatorMem.scala:109:14]
output [2:0] io_read_resp_bits_act, // @[AccumulatorMem.scala:109:14]
output io_write_ready, // @[AccumulatorMem.scala:109:14]
input io_write_valid, // @[AccumulatorMem.scala:109:14]
input [8:0] io_write_bits_addr, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_0_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_1_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_2_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_3_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_4_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_5_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_6_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_7_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_8_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_9_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_10_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_11_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_12_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_13_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_14_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_write_bits_data_15_0, // @[AccumulatorMem.scala:109:14]
input io_write_bits_acc, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_0, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_1, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_2, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_3, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_4, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_5, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_6, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_7, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_8, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_9, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_10, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_11, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_12, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_13, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_14, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_15, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_16, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_17, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_18, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_19, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_20, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_21, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_22, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_23, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_24, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_25, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_26, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_27, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_28, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_29, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_30, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_31, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_32, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_33, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_34, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_35, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_36, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_37, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_38, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_39, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_40, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_41, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_42, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_43, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_44, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_45, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_46, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_47, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_48, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_49, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_50, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_51, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_52, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_53, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_54, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_55, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_56, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_57, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_58, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_59, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_60, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_61, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_62, // @[AccumulatorMem.scala:109:14]
input io_write_bits_mask_63, // @[AccumulatorMem.scala:109:14]
output io_adder_valid, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_0_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_1_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_2_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_3_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_4_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_5_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_6_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_7_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_8_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_9_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_10_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_11_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_12_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_13_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_14_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op1_15_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_0_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_1_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_2_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_3_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_4_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_5_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_6_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_7_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_8_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_9_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_10_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_11_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_12_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_13_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_14_0, // @[AccumulatorMem.scala:109:14]
output [31:0] io_adder_op2_15_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_0_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_1_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_2_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_3_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_4_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_5_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_6_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_7_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_8_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_9_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_10_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_11_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_12_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_13_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_14_0, // @[AccumulatorMem.scala:109:14]
input [31:0] io_adder_sum_15_0 // @[AccumulatorMem.scala:109:14]
);
wire _q_io_enq_ready; // @[AccumulatorMem.scala:294:17]
wire _q_io_deq_valid; // @[AccumulatorMem.scala:294:17]
wire _q_io_count; // @[AccumulatorMem.scala:294:17]
wire [31:0] _mem_io_rdata_0_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_1_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_2_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_3_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_4_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_5_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_6_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_7_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_8_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_9_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_10_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_11_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_12_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_13_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_14_0; // @[SyncMem.scala:105:80]
wire [31:0] _mem_io_rdata_15_0; // @[SyncMem.scala:105:80]
wire io_read_req_valid_0 = io_read_req_valid; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_req_bits_scale_bits_0 = io_read_req_bits_scale_bits; // @[AccumulatorMem.scala:92:7]
wire [8:0] io_read_req_bits_addr_0 = io_read_req_bits_addr; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_req_bits_igelu_qb_0 = io_read_req_bits_igelu_qb; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_req_bits_igelu_qc_0 = io_read_req_bits_igelu_qc; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_req_bits_iexp_qln2_0 = io_read_req_bits_iexp_qln2; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_req_bits_iexp_qln2_inv_0 = io_read_req_bits_iexp_qln2_inv; // @[AccumulatorMem.scala:92:7]
wire [2:0] io_read_req_bits_act_0 = io_read_req_bits_act; // @[AccumulatorMem.scala:92:7]
wire io_read_req_bits_full_0 = io_read_req_bits_full; // @[AccumulatorMem.scala:92:7]
wire io_read_resp_ready_0 = io_read_resp_ready; // @[AccumulatorMem.scala:92:7]
wire io_write_valid_0 = io_write_valid; // @[AccumulatorMem.scala:92:7]
wire [8:0] io_write_bits_addr_0 = io_write_bits_addr; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_0_0_0 = io_write_bits_data_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_1_0_0 = io_write_bits_data_1_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_2_0_0 = io_write_bits_data_2_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_3_0_0 = io_write_bits_data_3_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_4_0_0 = io_write_bits_data_4_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_5_0_0 = io_write_bits_data_5_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_6_0_0 = io_write_bits_data_6_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_7_0_0 = io_write_bits_data_7_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_8_0_0 = io_write_bits_data_8_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_9_0_0 = io_write_bits_data_9_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_10_0_0 = io_write_bits_data_10_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_11_0_0 = io_write_bits_data_11_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_12_0_0 = io_write_bits_data_12_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_13_0_0 = io_write_bits_data_13_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_14_0_0 = io_write_bits_data_14_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_write_bits_data_15_0_0 = io_write_bits_data_15_0; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_acc_0 = io_write_bits_acc; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_0_0 = io_write_bits_mask_0; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_1_0 = io_write_bits_mask_1; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_2_0 = io_write_bits_mask_2; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_3_0 = io_write_bits_mask_3; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_4_0 = io_write_bits_mask_4; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_5_0 = io_write_bits_mask_5; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_6_0 = io_write_bits_mask_6; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_7_0 = io_write_bits_mask_7; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_8_0 = io_write_bits_mask_8; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_9_0 = io_write_bits_mask_9; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_10_0 = io_write_bits_mask_10; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_11_0 = io_write_bits_mask_11; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_12_0 = io_write_bits_mask_12; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_13_0 = io_write_bits_mask_13; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_14_0 = io_write_bits_mask_14; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_15_0 = io_write_bits_mask_15; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_16_0 = io_write_bits_mask_16; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_17_0 = io_write_bits_mask_17; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_18_0 = io_write_bits_mask_18; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_19_0 = io_write_bits_mask_19; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_20_0 = io_write_bits_mask_20; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_21_0 = io_write_bits_mask_21; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_22_0 = io_write_bits_mask_22; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_23_0 = io_write_bits_mask_23; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_24_0 = io_write_bits_mask_24; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_25_0 = io_write_bits_mask_25; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_26_0 = io_write_bits_mask_26; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_27_0 = io_write_bits_mask_27; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_28_0 = io_write_bits_mask_28; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_29_0 = io_write_bits_mask_29; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_30_0 = io_write_bits_mask_30; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_31_0 = io_write_bits_mask_31; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_32_0 = io_write_bits_mask_32; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_33_0 = io_write_bits_mask_33; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_34_0 = io_write_bits_mask_34; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_35_0 = io_write_bits_mask_35; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_36_0 = io_write_bits_mask_36; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_37_0 = io_write_bits_mask_37; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_38_0 = io_write_bits_mask_38; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_39_0 = io_write_bits_mask_39; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_40_0 = io_write_bits_mask_40; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_41_0 = io_write_bits_mask_41; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_42_0 = io_write_bits_mask_42; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_43_0 = io_write_bits_mask_43; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_44_0 = io_write_bits_mask_44; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_45_0 = io_write_bits_mask_45; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_46_0 = io_write_bits_mask_46; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_47_0 = io_write_bits_mask_47; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_48_0 = io_write_bits_mask_48; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_49_0 = io_write_bits_mask_49; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_50_0 = io_write_bits_mask_50; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_51_0 = io_write_bits_mask_51; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_52_0 = io_write_bits_mask_52; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_53_0 = io_write_bits_mask_53; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_54_0 = io_write_bits_mask_54; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_55_0 = io_write_bits_mask_55; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_56_0 = io_write_bits_mask_56; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_57_0 = io_write_bits_mask_57; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_58_0 = io_write_bits_mask_58; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_59_0 = io_write_bits_mask_59; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_60_0 = io_write_bits_mask_60; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_61_0 = io_write_bits_mask_61; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_62_0 = io_write_bits_mask_62; // @[AccumulatorMem.scala:92:7]
wire io_write_bits_mask_63_0 = io_write_bits_mask_63; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_0_0_0 = io_adder_sum_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_1_0_0 = io_adder_sum_1_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_2_0_0 = io_adder_sum_2_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_3_0_0 = io_adder_sum_3_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_4_0_0 = io_adder_sum_4_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_5_0_0 = io_adder_sum_5_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_6_0_0 = io_adder_sum_6_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_7_0_0 = io_adder_sum_7_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_8_0_0 = io_adder_sum_8_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_9_0_0 = io_adder_sum_9_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_10_0_0 = io_adder_sum_10_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_11_0_0 = io_adder_sum_11_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_12_0_0 = io_adder_sum_12_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_13_0_0 = io_adder_sum_13_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_14_0_0 = io_adder_sum_14_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_sum_15_0_0 = io_adder_sum_15_0; // @[AccumulatorMem.scala:92:7]
wire io_read_req_bits_fromDMA = 1'h1; // @[AccumulatorMem.scala:92:7]
wire _io_read_req_ready_T_9 = 1'h1; // @[AccumulatorMem.scala:331:7]
wire _io_write_ready_T = 1'h1; // @[AccumulatorMem.scala:334:21]
wire [1:0] io_read_resp_bits_acc_bank_id = 2'h0; // @[AccumulatorMem.scala:92:7]
wire block_read_req = 1'h0; // @[AccumulatorMem.scala:131:32]
wire block_write_req = 1'h0; // @[AccumulatorMem.scala:132:33]
wire _io_read_req_ready_T_11; // @[AccumulatorMem.scala:327:40]
wire _io_write_ready_T_9; // @[AccumulatorMem.scala:334:38]
wire _io_adder_valid_T; // @[AccumulatorMem.scala:127:47]
wire [31:0] rdata_for_adder_0_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_1_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_2_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_3_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_4_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_5_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_6_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_7_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_8_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_9_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_10_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_11_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_12_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_13_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_14_0; // @[AccumulatorMem.scala:121:29]
wire [31:0] rdata_for_adder_15_0; // @[AccumulatorMem.scala:121:29]
wire io_read_req_ready_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_0_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_1_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_2_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_3_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_4_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_5_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_6_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_7_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_8_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_9_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_10_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_11_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_12_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_13_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_14_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_data_15_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_scale_bits_0; // @[AccumulatorMem.scala:92:7]
wire io_read_resp_bits_fromDMA_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_igelu_qb_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_igelu_qc_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_iexp_qln2_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_read_resp_bits_iexp_qln2_inv_0; // @[AccumulatorMem.scala:92:7]
wire [2:0] io_read_resp_bits_act_0; // @[AccumulatorMem.scala:92:7]
wire io_read_resp_valid_0; // @[AccumulatorMem.scala:92:7]
wire io_write_ready_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_0_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_1_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_2_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_3_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_4_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_5_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_6_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_7_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_8_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_9_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_10_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_11_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_12_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_13_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_14_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op1_15_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_0_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_1_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_2_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_3_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_4_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_5_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_6_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_7_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_8_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_9_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_10_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_11_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_12_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_13_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_14_0_0; // @[AccumulatorMem.scala:92:7]
wire [31:0] io_adder_op2_15_0_0; // @[AccumulatorMem.scala:92:7]
wire io_adder_valid_0; // @[AccumulatorMem.scala:92:7]
reg pipelined_writes_0_valid; // @[AccumulatorMem.scala:113:29]
reg [8:0] pipelined_writes_0_bits_addr; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_0_bits_data_0_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_0_0_0 = pipelined_writes_0_bits_data_0_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_1_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_1_0_0 = pipelined_writes_0_bits_data_1_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_2_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_2_0_0 = pipelined_writes_0_bits_data_2_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_3_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_3_0_0 = pipelined_writes_0_bits_data_3_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_4_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_4_0_0 = pipelined_writes_0_bits_data_4_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_5_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_5_0_0 = pipelined_writes_0_bits_data_5_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_6_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_6_0_0 = pipelined_writes_0_bits_data_6_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_7_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_7_0_0 = pipelined_writes_0_bits_data_7_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_8_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_8_0_0 = pipelined_writes_0_bits_data_8_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_9_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_9_0_0 = pipelined_writes_0_bits_data_9_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_10_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_10_0_0 = pipelined_writes_0_bits_data_10_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_11_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_11_0_0 = pipelined_writes_0_bits_data_11_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_12_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_12_0_0 = pipelined_writes_0_bits_data_12_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_13_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_13_0_0 = pipelined_writes_0_bits_data_13_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_14_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_14_0_0 = pipelined_writes_0_bits_data_14_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg [31:0] pipelined_writes_0_bits_data_15_0; // @[AccumulatorMem.scala:113:29]
assign io_adder_op2_15_0_0 = pipelined_writes_0_bits_data_15_0; // @[AccumulatorMem.scala:92:7, :113:29]
reg pipelined_writes_0_bits_acc; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_0; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_1; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_2; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_3; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_4; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_5; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_6; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_7; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_8; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_9; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_10; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_11; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_12; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_13; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_14; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_15; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_16; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_17; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_18; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_19; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_20; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_21; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_22; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_23; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_24; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_25; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_26; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_27; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_28; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_29; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_30; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_31; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_32; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_33; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_34; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_35; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_36; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_37; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_38; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_39; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_40; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_41; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_42; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_43; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_44; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_45; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_46; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_47; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_48; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_49; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_50; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_51; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_52; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_53; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_54; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_55; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_56; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_57; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_58; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_59; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_60; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_61; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_62; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_0_bits_mask_63; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_valid; // @[AccumulatorMem.scala:113:29]
reg [8:0] pipelined_writes_1_bits_addr; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_0_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_1_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_2_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_3_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_4_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_5_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_6_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_7_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_8_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_9_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_10_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_11_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_12_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_13_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_14_0; // @[AccumulatorMem.scala:113:29]
reg [31:0] pipelined_writes_1_bits_data_15_0; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_acc; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_0; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_1; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_2; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_3; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_4; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_5; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_6; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_7; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_8; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_9; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_10; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_11; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_12; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_13; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_14; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_15; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_16; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_17; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_18; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_19; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_20; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_21; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_22; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_23; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_24; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_25; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_26; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_27; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_28; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_29; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_30; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_31; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_32; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_33; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_34; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_35; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_36; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_37; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_38; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_39; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_40; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_41; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_42; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_43; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_44; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_45; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_46; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_47; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_48; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_49; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_50; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_51; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_52; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_53; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_54; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_55; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_56; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_57; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_58; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_59; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_60; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_61; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_62; // @[AccumulatorMem.scala:113:29]
reg pipelined_writes_1_bits_mask_63; // @[AccumulatorMem.scala:113:29]
wire _T_3 = io_write_ready_0 & io_write_valid_0; // @[Decoupled.scala:51:35]
wire _pipelined_writes_0_valid_T; // @[Decoupled.scala:51:35]
assign _pipelined_writes_0_valid_T = _T_3; // @[Decoupled.scala:51:35]
wire _mem_io_raddr_T; // @[Decoupled.scala:51:35]
assign _mem_io_raddr_T = _T_3; // @[Decoupled.scala:51:35]
wire _mem_io_ren_T_1; // @[Decoupled.scala:51:35]
assign _mem_io_ren_T_1 = _T_3; // @[Decoupled.scala:51:35]
assign io_adder_op1_0_0_0 = rdata_for_adder_0_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_1_0_0 = rdata_for_adder_1_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_2_0_0 = rdata_for_adder_2_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_3_0_0 = rdata_for_adder_3_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_4_0_0 = rdata_for_adder_4_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_5_0_0 = rdata_for_adder_5_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_6_0_0 = rdata_for_adder_6_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_7_0_0 = rdata_for_adder_7_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_8_0_0 = rdata_for_adder_8_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_9_0_0 = rdata_for_adder_9_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_10_0_0 = rdata_for_adder_10_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_11_0_0 = rdata_for_adder_11_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_12_0_0 = rdata_for_adder_12_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_13_0_0 = rdata_for_adder_13_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_14_0_0 = rdata_for_adder_14_0; // @[AccumulatorMem.scala:92:7, :121:29]
assign io_adder_op1_15_0_0 = rdata_for_adder_15_0; // @[AccumulatorMem.scala:92:7, :121:29]
wire [31:0] rdata_for_read_resp_0_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_1_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_2_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_3_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_4_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_5_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_6_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_7_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_8_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_9_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_10_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_11_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_12_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_13_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_14_0; // @[AccumulatorMem.scala:123:33]
wire [31:0] rdata_for_read_resp_15_0; // @[AccumulatorMem.scala:123:33]
assign _io_adder_valid_T = pipelined_writes_0_valid & pipelined_writes_0_bits_acc; // @[AccumulatorMem.scala:113:29, :127:47]
assign io_adder_valid_0 = _io_adder_valid_T; // @[AccumulatorMem.scala:92:7, :127:47]
wire _mem_io_raddr_T_1 = _mem_io_raddr_T & io_write_bits_acc_0; // @[Decoupled.scala:51:35]
wire [8:0] _mem_io_raddr_T_2 = _mem_io_raddr_T_1 ? io_write_bits_addr_0 : io_read_req_bits_addr_0; // @[AccumulatorMem.scala:92:7, :146:{24,39}]
wire _T_2 = io_read_req_ready_0 & io_read_req_valid_0; // @[Decoupled.scala:51:35]
wire _mem_io_ren_T; // @[Decoupled.scala:51:35]
assign _mem_io_ren_T = _T_2; // @[Decoupled.scala:51:35]
wire _q_io_enq_valid_T; // @[Decoupled.scala:51:35]
assign _q_io_enq_valid_T = _T_2; // @[Decoupled.scala:51:35]
wire _mem_io_ren_T_2 = _mem_io_ren_T_1 & io_write_bits_acc_0; // @[Decoupled.scala:51:35]
wire _mem_io_ren_T_3 = _mem_io_ren_T | _mem_io_ren_T_2; // @[Decoupled.scala:51:35]
reg [31:0] q_io_enq_bits_scale_REG_bits; // @[AccumulatorMem.scala:302:33]
reg [31:0] q_io_enq_bits_igelu_qb_REG; // @[AccumulatorMem.scala:303:36]
reg [31:0] q_io_enq_bits_igelu_qc_REG; // @[AccumulatorMem.scala:304:36]
reg [31:0] q_io_enq_bits_iexp_qln2_REG; // @[AccumulatorMem.scala:305:37]
reg [31:0] q_io_enq_bits_iexp_qln2_inv_REG; // @[AccumulatorMem.scala:306:41]
reg [2:0] q_io_enq_bits_act_REG; // @[AccumulatorMem.scala:307:31]
reg q_io_enq_valid_REG; // @[AccumulatorMem.scala:310:28]
wire _q_will_be_empty_T = _q_io_enq_ready & q_io_enq_valid_REG; // @[Decoupled.scala:51:35]
wire [1:0] _q_will_be_empty_T_1 = {1'h0, _q_io_count} + {1'h0, _q_will_be_empty_T}; // @[Decoupled.scala:51:35]
wire _q_will_be_empty_T_2 = io_read_resp_ready_0 & _q_io_deq_valid; // @[Decoupled.scala:51:35]
wire [2:0] _q_will_be_empty_T_3 = {1'h0, _q_will_be_empty_T_1} - {2'h0, _q_will_be_empty_T_2}; // @[Decoupled.scala:51:35]
wire [1:0] _q_will_be_empty_T_4 = _q_will_be_empty_T_3[1:0]; // @[AccumulatorMem.scala:326:55]
wire q_will_be_empty = _q_will_be_empty_T_4 == 2'h0; // @[AccumulatorMem.scala:326:{55,71}]
wire _io_read_req_ready_T = io_write_valid_0 & io_write_bits_acc_0; // @[AccumulatorMem.scala:92:7, :329:24]
wire _io_read_req_ready_T_1 = ~_io_read_req_ready_T; // @[AccumulatorMem.scala:329:{7,24}]
wire _io_read_req_ready_T_2 = pipelined_writes_0_bits_addr == io_read_req_bits_addr_0; // @[AccumulatorMem.scala:92:7, :113:29, :330:57]
wire _io_read_req_ready_T_3 = pipelined_writes_0_valid & _io_read_req_ready_T_2; // @[AccumulatorMem.scala:113:29, :330:{42,57}]
wire _io_read_req_ready_T_4 = pipelined_writes_1_bits_addr == io_read_req_bits_addr_0; // @[AccumulatorMem.scala:92:7, :113:29, :330:57]
wire _io_read_req_ready_T_5 = pipelined_writes_1_valid & _io_read_req_ready_T_4; // @[AccumulatorMem.scala:113:29, :330:{42,57}]
wire _io_read_req_ready_T_6 = _io_read_req_ready_T_3 | _io_read_req_ready_T_5; // @[AccumulatorMem.scala:330:{42,92}]
wire _io_read_req_ready_T_7 = ~_io_read_req_ready_T_6; // @[AccumulatorMem.scala:330:{7,92}]
wire _io_read_req_ready_T_8 = _io_read_req_ready_T_1 & _io_read_req_ready_T_7; // @[AccumulatorMem.scala:329:{7,46}, :330:7]
wire _io_read_req_ready_T_10 = _io_read_req_ready_T_8; // @[AccumulatorMem.scala:329:46, :330:98]
assign _io_read_req_ready_T_11 = q_will_be_empty & _io_read_req_ready_T_10; // @[AccumulatorMem.scala:326:71, :327:40, :330:98]
assign io_read_req_ready_0 = _io_read_req_ready_T_11; // @[AccumulatorMem.scala:92:7, :327:40]
wire _io_write_ready_T_1 = pipelined_writes_0_bits_addr == io_write_bits_addr_0; // @[AccumulatorMem.scala:92:7, :113:29, :335:55]
wire _io_write_ready_T_2 = pipelined_writes_0_valid & _io_write_ready_T_1; // @[AccumulatorMem.scala:113:29, :335:{40,55}]
wire _io_write_ready_T_3 = _io_write_ready_T_2 & io_write_bits_acc_0; // @[AccumulatorMem.scala:92:7, :335:{40,78}]
wire _io_write_ready_T_4 = pipelined_writes_1_bits_addr == io_write_bits_addr_0; // @[AccumulatorMem.scala:92:7, :113:29, :335:55]
wire _io_write_ready_T_5 = pipelined_writes_1_valid & _io_write_ready_T_4; // @[AccumulatorMem.scala:113:29, :335:{40,55}]
wire _io_write_ready_T_6 = _io_write_ready_T_5 & io_write_bits_acc_0; // @[AccumulatorMem.scala:92:7, :335:{40,78}]
wire _io_write_ready_T_7 = _io_write_ready_T_3 | _io_write_ready_T_6; // @[AccumulatorMem.scala:335:{78,108}]
wire _io_write_ready_T_8 = ~_io_write_ready_T_7; // @[AccumulatorMem.scala:335:{5,108}]
assign _io_write_ready_T_9 = _io_write_ready_T_8; // @[AccumulatorMem.scala:334:38, :335:5]
assign io_write_ready_0 = _io_write_ready_T_9; // @[AccumulatorMem.scala:92:7, :334:38] |
Generate the Verilog code corresponding to the following Chisel files.
File IngressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class IngressUnit(
ingressNodeId: Int,
cParam: IngressChannelParams,
outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean,
combineSAST: Boolean,
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits)))
}
val io = IO(new IngressUnitIO)
val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2,
flow=combineRCVA))
assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR))
route_buffer.io.enq.bits.head := io.in.bits.head
route_buffer.io.enq.bits.tail := io.in.bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
route_buffer.io.enq.bits.flow := DontCare
} else {
route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U
route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U
route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U
route_buffer.io.enq.bits.flow.egress_node := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNode.U)
)
route_buffer.io.enq.bits.flow.egress_node_id := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNodeId.U)
)
}
route_buffer.io.enq.bits.payload := io.in.bits.payload
route_buffer.io.enq.bits.virt_channel_id := DontCare
io.router_req.bits.src_virt_id := 0.U
io.router_req.bits.flow := route_buffer.io.enq.bits.flow
val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U
route_buffer.io.enq.valid := io.in.valid && (
io.router_req.ready || !io.in.bits.head || at_dest)
io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest
io.in.ready := route_buffer.io.enq.ready && (
io.router_req.ready || !io.in.bits.head || at_dest)
route_q.io.enq.valid := io.router_req.fire
route_q.io.enq.bits := io.router_resp
when (io.in.fire && io.in.bits.head && at_dest) {
route_q.io.enq.valid := true.B
route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (egressParams(o).egressId.U === io.in.bits.egress_id) {
route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B
}
}
}
assert(!(route_q.io.enq.valid && !route_q.io.enq.ready))
val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams),
1, pipe=true))
vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits
io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel
io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow
io.vcalloc_req.bits.in_vc := 0.U
val head = route_buffer.io.deq.bits.head
val tail = route_buffer.io.deq.bits.tail
vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head)
)
io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid &&
head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready)
route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head) &&
(vcalloc_q.io.enq.ready || !head))
route_q.io.deq.ready := (route_buffer.io.deq.fire && tail)
vcalloc_q.io.enq.valid := io.vcalloc_req.fire
vcalloc_q.io.enq.bits := io.vcalloc_resp
assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready))
io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel
io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail
val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail
io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire
val out_bundle = if (combineSAST) {
Wire(Valid(new SwitchBundle(outParams, egressParams)))
} else {
Reg(Valid(new SwitchBundle(outParams, egressParams)))
}
io.out(0) := out_bundle
out_bundle.valid := vcalloc_buffer.io.deq.fire
out_bundle.bits.flit := vcalloc_buffer.io.deq.bits
out_bundle.bits.flit.virt_channel_id := 0.U
val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq
out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh,
vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq)
io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready
io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready
// TODO: We should not generate input/ingress/output/egress units for untraversable channels
if (!cParam.traversable) {
io.in.ready := false.B
io.router_req.valid := false.B
io.router_req.bits := DontCare
io.vcalloc_req.valid := false.B
io.vcalloc_req.bits := DontCare
io.salloc_req.foreach(_.valid := false.B)
io.salloc_req.foreach(_.bits := DontCare)
io.out.foreach(_.valid := false.B)
io.out.foreach(_.bits := DontCare)
}
}
| module IngressUnit_49( // @[IngressUnit.scala:11:7]
input clock, // @[IngressUnit.scala:11:7]
input reset, // @[IngressUnit.scala:11:7]
input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_8, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_9, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_10, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_11, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_12, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_13, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_14, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_15, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_16, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_17, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_18, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_19, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_20, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_21, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_3, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_4, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_5, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_6, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_7, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_8, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_9, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_10, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_11, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_12, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_13, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_14, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_15, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_16, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_17, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_18, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_19, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_20, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_21, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_1_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_10, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_11, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_14, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_15, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_18, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_19, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_20, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_21, // @[IngressUnit.scala:24:14]
input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_8, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_9, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_10, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_11, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_12, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_13, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_14, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_15, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_16, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_17, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_18, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_19, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_20, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_21, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14]
output io_out_0_valid, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14]
output [72:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14]
output [3:0] io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14]
output [5:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14]
output [5:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14]
output [4:0] io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14]
output io_in_ready, // @[IngressUnit.scala:24:14]
input io_in_valid, // @[IngressUnit.scala:24:14]
input io_in_bits_head, // @[IngressUnit.scala:24:14]
input io_in_bits_tail, // @[IngressUnit.scala:24:14]
input [72:0] io_in_bits_payload, // @[IngressUnit.scala:24:14]
input [5:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14]
);
wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_2; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_3; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_4; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_5; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_6; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_7; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_8; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_9; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_10; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_11; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_12; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_13; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_14; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_15; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_16; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_17; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_18; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_19; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_20; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_21; // @[IngressUnit.scala:76:25]
wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_head; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30]
wire [72:0] _vcalloc_buffer_io_deq_bits_payload; // @[IngressUnit.scala:75:30]
wire [3:0] _vcalloc_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:75:30]
wire [5:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:75:30]
wire [2:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:75:30]
wire [5:0] _vcalloc_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:75:30]
wire [2:0] _vcalloc_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:75:30]
wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23]
wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23]
wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28]
wire [72:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28]
wire [3:0] _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28]
wire [5:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28]
wire [5:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28]
wire [4:0] _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T = io_in_bits_egress_id == 6'h23; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_1 = io_in_bits_egress_id == 6'h2C; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_2 = io_in_bits_egress_id == 6'h29; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_3 = io_in_bits_egress_id == 6'h26; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_4 = io_in_bits_egress_id == 6'h2F; // @[IngressUnit.scala:30:72]
wire _io_router_req_valid_T_1 = io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head; // @[IngressUnit.scala:26:28, :58:{38,67}]
wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}]
wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29]
wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File FPU.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.{DontCare, WireInit, withClock, withReset}
import chisel3.experimental.SourceInfo
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
case class FPUParams(
minFLen: Int = 32,
fLen: Int = 64,
divSqrt: Boolean = true,
sfmaLatency: Int = 3,
dfmaLatency: Int = 4,
fpmuLatency: Int = 2,
ifpuLatency: Int = 2
)
object FPConstants
{
val RM_SZ = 3
val FLAGS_SZ = 5
}
trait HasFPUCtrlSigs {
val ldst = Bool()
val wen = Bool()
val ren1 = Bool()
val ren2 = Bool()
val ren3 = Bool()
val swap12 = Bool()
val swap23 = Bool()
val typeTagIn = UInt(2.W)
val typeTagOut = UInt(2.W)
val fromint = Bool()
val toint = Bool()
val fastpipe = Bool()
val fma = Bool()
val div = Bool()
val sqrt = Bool()
val wflags = Bool()
val vec = Bool()
}
class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs
class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val inst = Input(Bits(32.W))
val sigs = Output(new FPUCtrlSigs())
})
private val X2 = BitPat.dontCare(2)
val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N)
val h: Array[(BitPat, List[BitPat])] =
Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N),
FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N),
FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N),
FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N),
FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N),
FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N),
FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N),
FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N))
val f: Array[(BitPat, List[BitPat])] =
Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N),
FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N),
FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N),
FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N),
FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N),
FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N))
val d: Array[(BitPat, List[BitPat])] =
Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N),
FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N),
FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N),
FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N),
FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N),
FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N),
FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N),
FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N))
val fcvt_hd: Array[(BitPat, List[BitPat])] =
Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N),
FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N))
val vfmv_f_s: Array[(BitPat, List[BitPat])] =
Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y))
val insns = ((minFLen, fLen) match {
case (32, 32) => f
case (16, 32) => h ++ f
case (32, 64) => f ++ d
case (16, 64) => h ++ f ++ d ++ fcvt_hd
case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration")
}) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]())
val decoder = DecodeLogic(io.inst, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint,
s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec)
sigs zip decoder map {case(s,d) => s := d}
}
class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val time = Input(UInt(xLen.W))
val inst = Input(Bits(32.W))
val fromint_data = Input(Bits(xLen.W))
val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W))
val v_sew = Input(UInt(3.W))
val store_data = Output(Bits(fLen.W))
val toint_data = Output(Bits(xLen.W))
val ll_resp_val = Input(Bool())
val ll_resp_type = Input(Bits(3.W))
val ll_resp_tag = Input(UInt(5.W))
val ll_resp_data = Input(Bits(fLen.W))
val valid = Input(Bool())
val fcsr_rdy = Output(Bool())
val nack_mem = Output(Bool())
val illegal_rm = Output(Bool())
val killx = Input(Bool())
val killm = Input(Bool())
val dec = Output(new FPUCtrlSigs())
val sboard_set = Output(Bool())
val sboard_clr = Output(Bool())
val sboard_clra = Output(UInt(5.W))
val keep_clock_enabled = Input(Bool())
}
class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) {
val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs
val cp_resp = Decoupled(new FPResult())
}
class FPResult(implicit p: Parameters) extends CoreBundle()(p) {
val data = Bits((fLen+1).W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val typ = Bits(2.W)
val in1 = Bits(xLen.W)
}
class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val fmaCmd = Bits(2.W)
val typ = Bits(2.W)
val fmt = Bits(2.W)
val in1 = Bits((fLen+1).W)
val in2 = Bits((fLen+1).W)
val in3 = Bits((fLen+1).W)
}
case class FType(exp: Int, sig: Int) {
def ieeeWidth = exp + sig
def recodedWidth = ieeeWidth + 1
def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W)
def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W)
def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR
def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2)
def classify(x: UInt) = {
val sign = x(sig + exp)
val code = x(exp + sig - 1, exp + sig - 3)
val codeHi = code(2, 1)
val isSpecial = codeHi === 3.U
val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U
val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn
val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U
val isZero = code === 0.U
val isInf = isSpecial && !code(0)
val isNaN = code.andR
val isSNaN = isNaN && !x(sig-2)
val isQNaN = isNaN && x(sig-2)
Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign,
isSubnormal && !sign, isZero && !sign, isZero && sign,
isSubnormal && sign, isNormal && sign, isInf && sign)
}
// convert between formats, ignoring rounding, range, NaN
def unsafeConvert(x: UInt, to: FType) = if (this == to) x else {
val sign = x(sig + exp)
val fractIn = x(sig - 2, 0)
val expIn = x(sig + exp - 1, sig - 1)
val fractOut = fractIn << to.sig >> sig
val expOut = {
val expCode = expIn(exp, exp - 2)
val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U
Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0))
}
Cat(sign, expOut, fractOut)
}
private def ieeeBundle = {
val expWidth = exp
class IEEEBundle extends Bundle {
val sign = Bool()
val exp = UInt(expWidth.W)
val sig = UInt((ieeeWidth-expWidth-1).W)
}
new IEEEBundle
}
def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle)
def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x)
def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x)
}
object FType {
val H = new FType(5, 11)
val S = new FType(8, 24)
val D = new FType(11, 53)
val all = List(H, S, D)
}
trait HasFPUParameters {
require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen))
val minFLen: Int
val fLen: Int
def xLen: Int
val minXLen = 32
val nIntTypes = log2Ceil(xLen/minXLen) + 1
def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen)
def minType = floatTypes.head
def maxType = floatTypes.last
def prevType(t: FType) = floatTypes(typeTag(t) - 1)
def maxExpWidth = maxType.exp
def maxSigWidth = maxType.sig
def typeTag(t: FType) = floatTypes.indexOf(t)
def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U
def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U
// typeTag
def H = typeTagGroup(FType.H)
def S = typeTagGroup(FType.S)
def D = typeTagGroup(FType.D)
def I = typeTag(maxType).U
private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR
private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = {
require(xt.ieeeWidth == 2 * yt.ieeeWidth)
val swizzledNaN = Cat(
x(xt.sig + xt.exp, xt.sig + xt.exp - 3),
x(xt.sig - 2, yt.recodedWidth - 1).andR,
x(xt.sig + xt.exp - 5, xt.sig),
y(yt.recodedWidth - 2),
x(xt.sig - 2, yt.recodedWidth - 1),
y(yt.recodedWidth - 1),
y(yt.recodedWidth - 3, 0))
Mux(xt.isNaN(x), swizzledNaN, x)
}
// implement NaN unboxing for FU inputs
def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = {
val outType = exactType.getOrElse(maxType)
def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = {
val prev =
if (t == minType) {
Seq()
} else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prev = helper(unswizzled, prevT)
val isbox = isBox(x, t)
prev.map(p => (isbox && p._1, p._2))
}
prev :+ (true.B, t.unsafeConvert(x, outType))
}
val (oks, floats) = helper(x, maxType).unzip
if (exactType.isEmpty || floatTypes.size == 1) {
Mux(oks(tag), floats(tag), maxType.qNaN)
} else {
val t = exactType.get
floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN)
}
}
// make sure that the redundant bits in the NaN-boxed encoding are consistent
def consistent(x: UInt): Bool = {
def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prevOK = !isBox(x, t) || helper(unswizzled, prevT)
val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR
prevOK && curOK
}
helper(x, maxType)
}
// generate a NaN box from an FU result
def box(x: UInt, t: FType): UInt = {
if (t == maxType) {
x
} else {
val nt = floatTypes(typeTag(t) + 1)
val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t)
bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U
}
}
// generate a NaN box from an FU result
def box(x: UInt, tag: UInt): UInt = {
val opts = floatTypes.map(t => box(x, t))
opts(tag)
}
// zap bits that hardfloat thinks are don't-cares, but we do care about
def sanitizeNaN(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
x
} else {
val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W)
Mux(t.isNaN(x), maskedNaN, x)
}
}
// implement NaN boxing and recoding for FL*/fmv.*.x
def recode(x: UInt, tag: UInt): UInt = {
def helper(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
t.recode(x)
} else {
val prevT = prevType(t)
box(t.recode(x), t, helper(x, prevT), prevT)
}
}
// fill MSBs of subword loads to emulate a wider load of a NaN-boxed value
val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U)
helper(boxes(tag) | x, maxType)
}
// implement NaN unboxing and un-recoding for FS*/fmv.x.*
def ieee(x: UInt, t: FType = maxType): UInt = {
if (typeTag(t) == 0) {
t.ieee(x)
} else {
val unrecoded = t.ieee(x)
val prevT = prevType(t)
val prevRecoded = Cat(
x(prevT.recodedWidth-2),
x(t.sig-1),
x(prevT.recodedWidth-3, 0))
val prevUnrecoded = ieee(prevRecoded, prevT)
Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0)))
}
}
}
abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters
class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
class Output extends Bundle {
val in = new FPInput
val lt = Bool()
val store = Bits(fLen.W)
val toint = Bits(xLen.W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new Output)
})
val in = RegEnable(io.in.bits, io.in.valid)
val valid = RegNext(io.in.valid)
val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth))
dcmp.io.a := in.in1
dcmp.io.b := in.in2
dcmp.io.signaling := !in.rm(1)
val tag = in.typeTagOut
val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen))
else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
val toint = WireDefault(toint_ieee)
val intType = WireDefault(in.fmt(0))
io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType)
io.out.bits.exc := 0.U
when (in.rm(0)) {
val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag)
toint := classify_out | (toint_ieee >> minXLen << minXLen)
intType := false.B
}
when (in.wflags) { // feq/flt/fle, fcvt
toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen)
io.out.bits.exc := dcmp.io.exceptionFlags
intType := false.B
when (!in.ren2) { // fcvt
val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1)
intType := cvtType
val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen))
conv.io.in := in.in1
conv.io.roundingMode := in.rm
conv.io.signedOut := ~in.typ(0)
toint := conv.io.out
io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0))
for (i <- 0 until nIntTypes-1) {
val w = minXLen << i
when (cvtType === i.U) {
val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w))
narrow.io.in := in.in1
narrow.io.roundingMode := in.rm
narrow.io.signedOut := ~in.typ(0)
val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1)
val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign))
val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1)
when (invalid) { toint := Cat(conv.io.out >> w, excOut) }
io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0))
}
}
}
}
io.out.valid := valid
io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S)
io.out.bits.in := in
}
class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new IntToFPInput))
val out = Valid(new FPResult)
})
val in = Pipe(io.in)
val tag = in.bits.typeTagIn
val mux = Wire(new FPResult)
mux.exc := 0.U
mux.data := recode(in.bits.in1, tag)
val intValue = {
val res = WireDefault(in.bits.in1.asSInt)
for (i <- 0 until nIntTypes-1) {
val smallInt = in.bits.in1((minXLen << i) - 1, 0)
when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) {
res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt)
}
}
res.asUInt
}
when (in.bits.wflags) { // fcvt
// could be improved for RVD/RVQ with a single variable-position rounding
// unit, rather than N fixed-position ones
val i2fResults = for (t <- floatTypes) yield {
val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig))
i2f.io.signedIn := ~in.bits.typ(0)
i2f.io.in := intValue
i2f.io.roundingMode := in.bits.rm
i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding
(sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags)
}
val (data, exc) = i2fResults.unzip
val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last
mux.data := dataPadded(tag)
mux.exc := exc(tag)
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
val lt = Input(Bool()) // from FPToInt
})
val in = Pipe(io.in)
val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2))
val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0))
val fsgnjMux = Wire(new FPResult)
fsgnjMux.exc := 0.U
fsgnjMux.data := fsgnj
when (in.bits.wflags) { // fmin/fmax
val isnan1 = maxType.isNaN(in.bits.in1)
val isnan2 = maxType.isNaN(in.bits.in2)
val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2)
val isNaNOut = isnan1 && isnan2
val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1
fsgnjMux.exc := isInvalid << 4
fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2))
}
val inTag = in.bits.typeTagIn
val outTag = in.bits.typeTagOut
val mux = WireDefault(fsgnjMux)
for (t <- floatTypes.init) {
when (outTag === typeTag(t).U) {
mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t))
}
}
when (in.bits.wflags && !in.bits.ren2) { // fcvt
if (floatTypes.size > 1) {
// widening conversions simply canonicalize NaN operands
val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1)
fsgnjMux.data := widened
fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4
// narrowing conversions require rounding (for RVQ, this could be
// optimized to use a single variable-position rounding unit, rather
// than two fixed-position ones)
for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) {
val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig))
narrower.io.in := in.bits.in1
narrower.io.roundingMode := in.bits.rm
narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding
val narrowed = sanitizeNaN(narrower.io.out, outType)
mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed)
mux.exc := narrower.io.exceptionFlags
}
}
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module
{
override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}"
require(latency<=2)
val io = IO(new Bundle {
val validin = Input(Bool())
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
val validout = Output(Bool())
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
val valid_stage0 = Wire(Bool())
val roundingMode_stage0 = Wire(UInt(3.W))
val detectTininess_stage0 = Wire(UInt(1.W))
val postmul_regs = if(latency>0) 1 else 0
mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits
roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits
detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits
valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0))
val round_regs = if(latency==2) 1 else 0
roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits
roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits
roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits
roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits
io.validout := Pipe(valid_stage0, false.B, round_regs).valid
roundRawFNToRecFN.io.infiniteExc := false.B
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
class FPUFMAPipe(val latency: Int, val t: FType)
(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}"
require(latency>0)
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
})
val valid = RegNext(io.in.valid)
val in = Reg(new FPInput)
when (io.in.valid) {
val one = 1.U << (t.sig + t.exp - 1)
val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp))
val cmd_fma = io.in.bits.ren3
val cmd_addsub = io.in.bits.swap23
in := io.in.bits
when (cmd_addsub) { in.in2 := one }
when (!(cmd_fma || cmd_addsub)) { in.in3 := zero }
}
val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig))
fma.io.validin := valid
fma.io.op := in.fmaCmd
fma.io.roundingMode := in.rm
fma.io.detectTininess := hardfloat.consts.tininess_afterRounding
fma.io.a := in.in1
fma.io.b := in.in2
fma.io.c := in.in3
val res = Wire(new FPResult)
res.data := sanitizeNaN(fma.io.out, t)
res.exc := fma.io.exceptionFlags
io.out := Pipe(fma.io.validout, res, (latency-3) max 0)
}
class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new FPUIO)
val (useClockGating, useDebugROB) = coreParams match {
case r: RocketCoreParams =>
val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1
(r.clockGate, sz < 1)
case _ => (false, false)
}
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cp_req.valid
val gated_clock =
if (!useClockGating) clock
else ClockGate(clock, clock_en, "fpu_clock_gate")
val fp_decoder = Module(new FPUDecoder)
fp_decoder.io.inst := io.inst
val id_ctrl = WireInit(fp_decoder.io.sigs)
coreParams match { case r: RocketCoreParams => r.vector.map(v => {
val v_decode = v.decoder(p) // Only need to get ren1
v_decode.io.inst := io.inst
v_decode.io.vconfig := DontCare // core deals with this
when (v_decode.io.legal && v_decode.io.read_frs1) {
id_ctrl.ren1 := true.B
id_ctrl.swap12 := false.B
id_ctrl.toint := true.B
id_ctrl.typeTagIn := I
id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S)
}
when (v_decode.io.write_frd) { id_ctrl.wen := true.B }
})}
val ex_reg_valid = RegNext(io.valid, false.B)
val ex_reg_inst = RegEnable(io.inst, io.valid)
val ex_reg_ctrl = RegEnable(id_ctrl, io.valid)
val ex_ra = List.fill(3)(Reg(UInt()))
// load/vector response
val load_wb = RegNext(io.ll_resp_val)
val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val)
val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val)
val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val)
class FPUImpl { // entering gated-clock domain
val req_valid = ex_reg_valid || io.cp_req.valid
val ex_cp_valid = io.cp_req.fire
val mem_cp_valid = RegNext(ex_cp_valid, false.B)
val wb_cp_valid = RegNext(mem_cp_valid, false.B)
val mem_reg_valid = RegInit(false.B)
val killm = (io.killm || io.nack_mem) && !mem_cp_valid
// Kill X-stage instruction if M-stage is killed. This prevents it from
// speculatively being sent to the div-sqrt unit, which can cause priority
// inversion for two back-to-back divides, the first of which is killed.
val killx = io.killx || mem_reg_valid && killm
mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid
val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid)
val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B)
val cp_ctrl = Wire(new FPUCtrlSigs)
cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs)
io.cp_resp.valid := false.B
io.cp_resp.bits.data := 0.U
io.cp_resp.bits.exc := DontCare
val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl)
val mem_ctrl = RegEnable(ex_ctrl, req_valid)
val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid)
// CoreMonitorBundle to monitor fp register file writes
val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare))
frfWriteBundle.foreach { i =>
i.clock := clock
i.reset := reset
i.hartid := io.hartid
i.timer := io.time(31,0)
i.valid := false.B
i.wrenx := false.B
i.wrenf := false.B
i.excpt := false.B
}
// regfile
val regfile = Mem(32, Bits((fLen+1).W))
when (load_wb) {
val wdata = recode(load_wb_data, load_wb_typeTag)
regfile(load_wb_tag) := wdata
assert(consistent(wdata))
if (enableCommitLog)
printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata))
if (useDebugROB)
DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata))
frfWriteBundle(0).wrdst := load_wb_tag
frfWriteBundle(0).wrenf := true.B
frfWriteBundle(0).wrdata := ieee(wdata)
}
val ex_rs = ex_ra.map(a => regfile(a))
when (io.valid) {
when (id_ctrl.ren1) {
when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) }
when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) }
}
when (id_ctrl.ren2) {
when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) }
when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) }
when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) }
}
when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) }
}
val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12))
def fuInput(minT: Option[FType]): FPInput = {
val req = Wire(new FPInput)
val tag = ex_ctrl.typeTagIn
req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs)
req.rm := ex_rm
req.in1 := unbox(ex_rs(0), tag, minT)
req.in2 := unbox(ex_rs(1), tag, minT)
req.in3 := unbox(ex_rs(2), tag, minT)
req.typ := ex_reg_inst(21,20)
req.fmt := ex_reg_inst(26,25)
req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27))
when (ex_cp_valid) {
req := io.cp_req.bits
when (io.cp_req.bits.swap12) {
req.in1 := io.cp_req.bits.in2
req.in2 := io.cp_req.bits.in1
}
when (io.cp_req.bits.swap23) {
req.in2 := io.cp_req.bits.in3
req.in3 := io.cp_req.bits.in2
}
}
req
}
val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S))
sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new FPToInt)
fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
io.store_data := fpiu.io.out.bits.store
io.toint_data := fpiu.io.out.bits.toint
when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){
io.cp_resp.bits.data := fpiu.io.out.bits.toint
io.cp_resp.valid := true.B
}
val ifpu = Module(new IntToFP(cfg.ifpuLatency))
ifpu.io.in.valid := req_valid && ex_ctrl.fromint
ifpu.io.in.bits := fpiu.io.in.bits
ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data)
val fpmu = Module(new FPToFP(cfg.fpmuLatency))
fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val divSqrt_wen = WireDefault(false.B)
val divSqrt_inFlight = WireDefault(false.B)
val divSqrt_waddr = Reg(UInt(5.W))
val divSqrt_cp = Reg(Bool())
val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W))
val divSqrt_wdata = Wire(UInt((fLen+1).W))
val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W))
divSqrt_typeTag := DontCare
divSqrt_wdata := DontCare
divSqrt_flags := DontCare
// writeback arbitration
case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult)
val pipes = List(
Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits),
Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits),
Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++
(fLen > 32).option({
val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D))
dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D
dfma.io.in.bits := fuInput(Some(dfma.t))
Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits)
}) ++
(minFLen == 16).option({
val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H))
hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H
hfma.io.in.bits := fuInput(Some(hfma.t))
Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits)
})
def latencyMask(c: FPUCtrlSigs, offset: Int) = {
require(pipes.forall(_.lat >= offset))
pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_)
}
def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_)
val maxLatency = pipes.map(_.lat).max
val memLatencyMask = latencyMask(mem_ctrl, 2)
class WBInfo extends Bundle {
val rd = UInt(5.W)
val typeTag = UInt(log2Up(floatTypes.size).W)
val cp = Bool()
val pipeid = UInt(log2Ceil(pipes.size).W)
}
val wen = RegInit(0.U((maxLatency-1).W))
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback")
for (i <- 0 until maxLatency-2) {
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
}
wen := wen >> 1
when (mem_wen) {
when (!killm) {
wen := wen >> 1 | memLatencyMask
}
for (i <- 0 until maxLatency-1) {
when (!write_port_busy && memLatencyMask(i)) {
wbInfo(i).cp := mem_cp_valid
wbInfo(i).typeTag := mem_ctrl.typeTagOut
wbInfo(i).pipeid := pipeid(mem_ctrl)
wbInfo(i).rd := mem_reg_inst(11,7)
}
}
}
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp)
val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag)
val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag)
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
assert(consistent(wdata))
regfile(waddr) := wdata
if (enableCommitLog) {
printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata))
}
frfWriteBundle(1).wrdst := waddr
frfWriteBundle(1).wrenf := true.B
frfWriteBundle(1).wrdata := ieee(wdata)
}
if (useDebugROB) {
DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata))
}
when (wb_cp && (wen(0) || divSqrt_wen)) {
io.cp_resp.bits.data := wdata
io.cp_resp.valid := true.B
}
assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B,
s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}")
// Avoid structural hazards and nacking of external requests
// toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs
io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight
val wb_toint_valid = wb_reg_valid && wb_ctrl.toint
val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint)
io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0)
io.fcsr_flags.bits :=
Mux(wb_toint_valid, wb_toint_exc, 0.U) |
Mux(divSqrt_wen, divSqrt_flags, 0.U) |
Mux(wen(0), wexc, 0.U)
val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR
io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight)
io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid
io.dec <> id_ctrl
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_)
io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec)
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U)))
io.sboard_clra := waddr
ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle")
// we don't currently support round-max-magnitude (rm=4)
io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U
if (cfg.divSqrt) {
val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight
val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B)
when (divSqrt_inValid) {
divSqrt_waddr := mem_reg_inst(11,7)
divSqrt_cp := mem_cp_valid
}
ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider")
ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard")
ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback")
for (t <- floatTypes) {
val tag = mem_ctrl.typeTagOut
val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) }
divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U
divSqrt.io.sqrtOp := mem_ctrl.sqrt
divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t)
divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t)
divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight
when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) {
divSqrt_wen := !divSqrt_killed
divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t)
divSqrt_flags := divSqrt.io.exceptionFlags
divSqrt_typeTag := typeTag(t).U
}
}
when (divSqrt_killed) { divSqrt_inFlight := false.B }
} else {
when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B }
}
// gate the clock
clock_en_reg := !useClockGating.B ||
io.keep_clock_enabled || // chicken bit
io.valid || // ID stage
req_valid || // EX stage
mem_reg_valid || mem_cp_valid || // MEM stage
wb_reg_valid || wb_cp_valid || // WB stage
wen.orR || divSqrt_inFlight || // post-WB stage
io.ll_resp_val // load writeback
} // leaving gated-clock domain
val fpuImpl = withClock (gated_clock) { new FPUImpl }
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FPU_$label", "Core;;" + desc)
}
File rawFloatFromFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
object rawFloatFromFN {
def apply(expWidth: Int, sigWidth: Int, in: Bits) = {
val sign = in(expWidth + sigWidth - 1)
val expIn = in(expWidth + sigWidth - 2, sigWidth - 1)
val fractIn = in(sigWidth - 2, 0)
val isZeroExpIn = (expIn === 0.U)
val isZeroFractIn = (fractIn === 0.U)
val normDist = countLeadingZeros(fractIn)
val subnormFract = (fractIn << normDist) (sigWidth - 3, 0) << 1
val adjustedExp =
Mux(isZeroExpIn,
normDist ^ ((BigInt(1) << (expWidth + 1)) - 1).U,
expIn
) + ((BigInt(1) << (expWidth - 1)).U
| Mux(isZeroExpIn, 2.U, 1.U))
val isZero = isZeroExpIn && isZeroFractIn
val isSpecial = adjustedExp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && !isZeroFractIn
out.isInf := isSpecial && isZeroFractIn
out.isZero := isZero
out.sign := sign
out.sExp := adjustedExp(expWidth, 0).zext
out.sig :=
0.U(1.W) ## !isZero ## Mux(isZeroExpIn, subnormFract, fractIn)
out
}
}
| module IntToFP_7( // @[FPU.scala:528:7]
input clock, // @[FPU.scala:528:7]
input reset, // @[FPU.scala:528:7]
input io_in_valid, // @[FPU.scala:529:14]
input io_in_bits_ldst, // @[FPU.scala:529:14]
input io_in_bits_wen, // @[FPU.scala:529:14]
input io_in_bits_ren1, // @[FPU.scala:529:14]
input io_in_bits_ren2, // @[FPU.scala:529:14]
input io_in_bits_ren3, // @[FPU.scala:529:14]
input io_in_bits_swap12, // @[FPU.scala:529:14]
input io_in_bits_swap23, // @[FPU.scala:529:14]
input [1:0] io_in_bits_typeTagIn, // @[FPU.scala:529:14]
input [1:0] io_in_bits_typeTagOut, // @[FPU.scala:529:14]
input io_in_bits_fromint, // @[FPU.scala:529:14]
input io_in_bits_toint, // @[FPU.scala:529:14]
input io_in_bits_fastpipe, // @[FPU.scala:529:14]
input io_in_bits_fma, // @[FPU.scala:529:14]
input io_in_bits_div, // @[FPU.scala:529:14]
input io_in_bits_sqrt, // @[FPU.scala:529:14]
input io_in_bits_wflags, // @[FPU.scala:529:14]
input io_in_bits_vec, // @[FPU.scala:529:14]
input [2:0] io_in_bits_rm, // @[FPU.scala:529:14]
input [1:0] io_in_bits_typ, // @[FPU.scala:529:14]
input [63:0] io_in_bits_in1, // @[FPU.scala:529:14]
output [64:0] io_out_bits_data, // @[FPU.scala:529:14]
output [4:0] io_out_bits_exc // @[FPU.scala:529:14]
);
wire mux_data_rawIn_2_isNaN; // @[rawFloatFromFN.scala:63:19]
wire mux_data_rawIn_1_isNaN; // @[rawFloatFromFN.scala:63:19]
wire mux_data_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19]
wire [64:0] _i2fResults_i2f_2_io_out; // @[FPU.scala:556:23]
wire [4:0] _i2fResults_i2f_2_io_exceptionFlags; // @[FPU.scala:556:23]
wire [32:0] _i2fResults_i2f_1_io_out; // @[FPU.scala:556:23]
wire [4:0] _i2fResults_i2f_1_io_exceptionFlags; // @[FPU.scala:556:23]
wire [16:0] _i2fResults_i2f_io_out; // @[FPU.scala:556:23]
wire [4:0] _i2fResults_i2f_io_exceptionFlags; // @[FPU.scala:556:23]
wire io_in_valid_0 = io_in_valid; // @[FPU.scala:528:7]
wire io_in_bits_ldst_0 = io_in_bits_ldst; // @[FPU.scala:528:7]
wire io_in_bits_wen_0 = io_in_bits_wen; // @[FPU.scala:528:7]
wire io_in_bits_ren1_0 = io_in_bits_ren1; // @[FPU.scala:528:7]
wire io_in_bits_ren2_0 = io_in_bits_ren2; // @[FPU.scala:528:7]
wire io_in_bits_ren3_0 = io_in_bits_ren3; // @[FPU.scala:528:7]
wire io_in_bits_swap12_0 = io_in_bits_swap12; // @[FPU.scala:528:7]
wire io_in_bits_swap23_0 = io_in_bits_swap23; // @[FPU.scala:528:7]
wire [1:0] io_in_bits_typeTagIn_0 = io_in_bits_typeTagIn; // @[FPU.scala:528:7]
wire [1:0] io_in_bits_typeTagOut_0 = io_in_bits_typeTagOut; // @[FPU.scala:528:7]
wire io_in_bits_fromint_0 = io_in_bits_fromint; // @[FPU.scala:528:7]
wire io_in_bits_toint_0 = io_in_bits_toint; // @[FPU.scala:528:7]
wire io_in_bits_fastpipe_0 = io_in_bits_fastpipe; // @[FPU.scala:528:7]
wire io_in_bits_fma_0 = io_in_bits_fma; // @[FPU.scala:528:7]
wire io_in_bits_div_0 = io_in_bits_div; // @[FPU.scala:528:7]
wire io_in_bits_sqrt_0 = io_in_bits_sqrt; // @[FPU.scala:528:7]
wire io_in_bits_wflags_0 = io_in_bits_wflags; // @[FPU.scala:528:7]
wire io_in_bits_vec_0 = io_in_bits_vec; // @[FPU.scala:528:7]
wire [2:0] io_in_bits_rm_0 = io_in_bits_rm; // @[FPU.scala:528:7]
wire [1:0] io_in_bits_typ_0 = io_in_bits_typ; // @[FPU.scala:528:7]
wire [63:0] io_in_bits_in1_0 = io_in_bits_in1; // @[FPU.scala:528:7]
wire [32:0] _i2fResults_maskedNaN_T = 33'h1EF7FFFFF; // @[FPU.scala:413:27]
wire [64:0] _i2fResults_maskedNaN_T_1 = 65'h1EFEFFFFFFFFFFFFF; // @[FPU.scala:413:27]
wire io_out_pipe_out_valid; // @[Valid.scala:135:21]
wire [64:0] io_out_pipe_out_bits_data; // @[Valid.scala:135:21]
wire [4:0] io_out_pipe_out_bits_exc; // @[Valid.scala:135:21]
wire [64:0] io_out_bits_data_0; // @[FPU.scala:528:7]
wire [4:0] io_out_bits_exc_0; // @[FPU.scala:528:7]
wire io_out_valid; // @[FPU.scala:528:7]
reg in_pipe_v; // @[Valid.scala:141:24]
wire in_valid = in_pipe_v; // @[Valid.scala:135:21, :141:24]
reg in_pipe_b_ldst; // @[Valid.scala:142:26]
wire in_bits_ldst = in_pipe_b_ldst; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_wen; // @[Valid.scala:142:26]
wire in_bits_wen = in_pipe_b_wen; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_ren1; // @[Valid.scala:142:26]
wire in_bits_ren1 = in_pipe_b_ren1; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_ren2; // @[Valid.scala:142:26]
wire in_bits_ren2 = in_pipe_b_ren2; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_ren3; // @[Valid.scala:142:26]
wire in_bits_ren3 = in_pipe_b_ren3; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_swap12; // @[Valid.scala:142:26]
wire in_bits_swap12 = in_pipe_b_swap12; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_swap23; // @[Valid.scala:142:26]
wire in_bits_swap23 = in_pipe_b_swap23; // @[Valid.scala:135:21, :142:26]
reg [1:0] in_pipe_b_typeTagIn; // @[Valid.scala:142:26]
wire [1:0] in_bits_typeTagIn = in_pipe_b_typeTagIn; // @[Valid.scala:135:21, :142:26]
reg [1:0] in_pipe_b_typeTagOut; // @[Valid.scala:142:26]
wire [1:0] in_bits_typeTagOut = in_pipe_b_typeTagOut; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_fromint; // @[Valid.scala:142:26]
wire in_bits_fromint = in_pipe_b_fromint; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_toint; // @[Valid.scala:142:26]
wire in_bits_toint = in_pipe_b_toint; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_fastpipe; // @[Valid.scala:142:26]
wire in_bits_fastpipe = in_pipe_b_fastpipe; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_fma; // @[Valid.scala:142:26]
wire in_bits_fma = in_pipe_b_fma; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_div; // @[Valid.scala:142:26]
wire in_bits_div = in_pipe_b_div; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_sqrt; // @[Valid.scala:142:26]
wire in_bits_sqrt = in_pipe_b_sqrt; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_wflags; // @[Valid.scala:142:26]
wire in_bits_wflags = in_pipe_b_wflags; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_vec; // @[Valid.scala:142:26]
wire in_bits_vec = in_pipe_b_vec; // @[Valid.scala:135:21, :142:26]
reg [2:0] in_pipe_b_rm; // @[Valid.scala:142:26]
wire [2:0] in_bits_rm = in_pipe_b_rm; // @[Valid.scala:135:21, :142:26]
reg [1:0] in_pipe_b_typ; // @[Valid.scala:142:26]
wire [1:0] in_bits_typ = in_pipe_b_typ; // @[Valid.scala:135:21, :142:26]
reg [63:0] in_pipe_b_in1; // @[Valid.scala:142:26]
wire [63:0] in_bits_in1 = in_pipe_b_in1; // @[Valid.scala:135:21, :142:26]
wire [63:0] _intValue_res_T = in_bits_in1; // @[Valid.scala:135:21]
wire [64:0] mux_data; // @[FPU.scala:537:17]
wire [4:0] mux_exc; // @[FPU.scala:537:17]
wire _GEN = in_bits_typeTagIn == 2'h1; // @[Valid.scala:135:21]
wire _mux_data_T; // @[package.scala:39:86]
assign _mux_data_T = _GEN; // @[package.scala:39:86]
wire _mux_data_T_40; // @[package.scala:39:86]
assign _mux_data_T_40 = _GEN; // @[package.scala:39:86]
wire _mux_exc_T; // @[package.scala:39:86]
assign _mux_exc_T = _GEN; // @[package.scala:39:86]
wire [63:0] _mux_data_T_1 = _mux_data_T ? 64'hFFFFFFFF00000000 : 64'hFFFFFFFFFFFF0000; // @[package.scala:39:{76,86}]
wire _GEN_0 = in_bits_typeTagIn == 2'h2; // @[Valid.scala:135:21]
wire _mux_data_T_2; // @[package.scala:39:86]
assign _mux_data_T_2 = _GEN_0; // @[package.scala:39:86]
wire _mux_data_T_42; // @[package.scala:39:86]
assign _mux_data_T_42 = _GEN_0; // @[package.scala:39:86]
wire _mux_exc_T_2; // @[package.scala:39:86]
assign _mux_exc_T_2 = _GEN_0; // @[package.scala:39:86]
wire [63:0] _mux_data_T_3 = _mux_data_T_2 ? 64'h0 : _mux_data_T_1; // @[package.scala:39:{76,86}]
wire _mux_data_T_4 = &in_bits_typeTagIn; // @[Valid.scala:135:21]
wire [63:0] _mux_data_T_5 = _mux_data_T_4 ? 64'h0 : _mux_data_T_3; // @[package.scala:39:{76,86}]
wire [63:0] _mux_data_T_6 = _mux_data_T_5 | in_bits_in1; // @[Valid.scala:135:21]
wire mux_data_rawIn_sign = _mux_data_T_6[63]; // @[FPU.scala:431:23]
wire mux_data_rawIn_sign_0 = mux_data_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [10:0] mux_data_rawIn_expIn = _mux_data_T_6[62:52]; // @[FPU.scala:431:23]
wire [51:0] mux_data_rawIn_fractIn = _mux_data_T_6[51:0]; // @[FPU.scala:431:23]
wire mux_data_rawIn_isZeroExpIn = mux_data_rawIn_expIn == 11'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire mux_data_rawIn_isZeroFractIn = mux_data_rawIn_fractIn == 52'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _mux_data_rawIn_normDist_T = mux_data_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_1 = mux_data_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_2 = mux_data_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_3 = mux_data_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_4 = mux_data_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_5 = mux_data_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_6 = mux_data_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_7 = mux_data_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_8 = mux_data_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_9 = mux_data_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_10 = mux_data_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_11 = mux_data_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_12 = mux_data_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_13 = mux_data_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_14 = mux_data_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_15 = mux_data_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_16 = mux_data_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_17 = mux_data_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_18 = mux_data_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_19 = mux_data_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_20 = mux_data_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_21 = mux_data_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_22 = mux_data_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_23 = mux_data_rawIn_fractIn[23]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_24 = mux_data_rawIn_fractIn[24]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_25 = mux_data_rawIn_fractIn[25]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_26 = mux_data_rawIn_fractIn[26]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_27 = mux_data_rawIn_fractIn[27]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_28 = mux_data_rawIn_fractIn[28]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_29 = mux_data_rawIn_fractIn[29]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_30 = mux_data_rawIn_fractIn[30]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_31 = mux_data_rawIn_fractIn[31]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_32 = mux_data_rawIn_fractIn[32]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_33 = mux_data_rawIn_fractIn[33]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_34 = mux_data_rawIn_fractIn[34]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_35 = mux_data_rawIn_fractIn[35]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_36 = mux_data_rawIn_fractIn[36]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_37 = mux_data_rawIn_fractIn[37]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_38 = mux_data_rawIn_fractIn[38]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_39 = mux_data_rawIn_fractIn[39]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_40 = mux_data_rawIn_fractIn[40]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_41 = mux_data_rawIn_fractIn[41]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_42 = mux_data_rawIn_fractIn[42]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_43 = mux_data_rawIn_fractIn[43]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_44 = mux_data_rawIn_fractIn[44]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_45 = mux_data_rawIn_fractIn[45]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_46 = mux_data_rawIn_fractIn[46]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_47 = mux_data_rawIn_fractIn[47]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_48 = mux_data_rawIn_fractIn[48]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_49 = mux_data_rawIn_fractIn[49]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_50 = mux_data_rawIn_fractIn[50]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_51 = mux_data_rawIn_fractIn[51]; // @[rawFloatFromFN.scala:46:21]
wire [5:0] _mux_data_rawIn_normDist_T_52 = {5'h19, ~_mux_data_rawIn_normDist_T_1}; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_53 = _mux_data_rawIn_normDist_T_2 ? 6'h31 : _mux_data_rawIn_normDist_T_52; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_54 = _mux_data_rawIn_normDist_T_3 ? 6'h30 : _mux_data_rawIn_normDist_T_53; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_55 = _mux_data_rawIn_normDist_T_4 ? 6'h2F : _mux_data_rawIn_normDist_T_54; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_56 = _mux_data_rawIn_normDist_T_5 ? 6'h2E : _mux_data_rawIn_normDist_T_55; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_57 = _mux_data_rawIn_normDist_T_6 ? 6'h2D : _mux_data_rawIn_normDist_T_56; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_58 = _mux_data_rawIn_normDist_T_7 ? 6'h2C : _mux_data_rawIn_normDist_T_57; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_59 = _mux_data_rawIn_normDist_T_8 ? 6'h2B : _mux_data_rawIn_normDist_T_58; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_60 = _mux_data_rawIn_normDist_T_9 ? 6'h2A : _mux_data_rawIn_normDist_T_59; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_61 = _mux_data_rawIn_normDist_T_10 ? 6'h29 : _mux_data_rawIn_normDist_T_60; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_62 = _mux_data_rawIn_normDist_T_11 ? 6'h28 : _mux_data_rawIn_normDist_T_61; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_63 = _mux_data_rawIn_normDist_T_12 ? 6'h27 : _mux_data_rawIn_normDist_T_62; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_64 = _mux_data_rawIn_normDist_T_13 ? 6'h26 : _mux_data_rawIn_normDist_T_63; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_65 = _mux_data_rawIn_normDist_T_14 ? 6'h25 : _mux_data_rawIn_normDist_T_64; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_66 = _mux_data_rawIn_normDist_T_15 ? 6'h24 : _mux_data_rawIn_normDist_T_65; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_67 = _mux_data_rawIn_normDist_T_16 ? 6'h23 : _mux_data_rawIn_normDist_T_66; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_68 = _mux_data_rawIn_normDist_T_17 ? 6'h22 : _mux_data_rawIn_normDist_T_67; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_69 = _mux_data_rawIn_normDist_T_18 ? 6'h21 : _mux_data_rawIn_normDist_T_68; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_70 = _mux_data_rawIn_normDist_T_19 ? 6'h20 : _mux_data_rawIn_normDist_T_69; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_71 = _mux_data_rawIn_normDist_T_20 ? 6'h1F : _mux_data_rawIn_normDist_T_70; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_72 = _mux_data_rawIn_normDist_T_21 ? 6'h1E : _mux_data_rawIn_normDist_T_71; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_73 = _mux_data_rawIn_normDist_T_22 ? 6'h1D : _mux_data_rawIn_normDist_T_72; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_74 = _mux_data_rawIn_normDist_T_23 ? 6'h1C : _mux_data_rawIn_normDist_T_73; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_75 = _mux_data_rawIn_normDist_T_24 ? 6'h1B : _mux_data_rawIn_normDist_T_74; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_76 = _mux_data_rawIn_normDist_T_25 ? 6'h1A : _mux_data_rawIn_normDist_T_75; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_77 = _mux_data_rawIn_normDist_T_26 ? 6'h19 : _mux_data_rawIn_normDist_T_76; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_78 = _mux_data_rawIn_normDist_T_27 ? 6'h18 : _mux_data_rawIn_normDist_T_77; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_79 = _mux_data_rawIn_normDist_T_28 ? 6'h17 : _mux_data_rawIn_normDist_T_78; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_80 = _mux_data_rawIn_normDist_T_29 ? 6'h16 : _mux_data_rawIn_normDist_T_79; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_81 = _mux_data_rawIn_normDist_T_30 ? 6'h15 : _mux_data_rawIn_normDist_T_80; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_82 = _mux_data_rawIn_normDist_T_31 ? 6'h14 : _mux_data_rawIn_normDist_T_81; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_83 = _mux_data_rawIn_normDist_T_32 ? 6'h13 : _mux_data_rawIn_normDist_T_82; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_84 = _mux_data_rawIn_normDist_T_33 ? 6'h12 : _mux_data_rawIn_normDist_T_83; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_85 = _mux_data_rawIn_normDist_T_34 ? 6'h11 : _mux_data_rawIn_normDist_T_84; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_86 = _mux_data_rawIn_normDist_T_35 ? 6'h10 : _mux_data_rawIn_normDist_T_85; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_87 = _mux_data_rawIn_normDist_T_36 ? 6'hF : _mux_data_rawIn_normDist_T_86; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_88 = _mux_data_rawIn_normDist_T_37 ? 6'hE : _mux_data_rawIn_normDist_T_87; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_89 = _mux_data_rawIn_normDist_T_38 ? 6'hD : _mux_data_rawIn_normDist_T_88; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_90 = _mux_data_rawIn_normDist_T_39 ? 6'hC : _mux_data_rawIn_normDist_T_89; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_91 = _mux_data_rawIn_normDist_T_40 ? 6'hB : _mux_data_rawIn_normDist_T_90; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_92 = _mux_data_rawIn_normDist_T_41 ? 6'hA : _mux_data_rawIn_normDist_T_91; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_93 = _mux_data_rawIn_normDist_T_42 ? 6'h9 : _mux_data_rawIn_normDist_T_92; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_94 = _mux_data_rawIn_normDist_T_43 ? 6'h8 : _mux_data_rawIn_normDist_T_93; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_95 = _mux_data_rawIn_normDist_T_44 ? 6'h7 : _mux_data_rawIn_normDist_T_94; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_96 = _mux_data_rawIn_normDist_T_45 ? 6'h6 : _mux_data_rawIn_normDist_T_95; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_97 = _mux_data_rawIn_normDist_T_46 ? 6'h5 : _mux_data_rawIn_normDist_T_96; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_98 = _mux_data_rawIn_normDist_T_47 ? 6'h4 : _mux_data_rawIn_normDist_T_97; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_99 = _mux_data_rawIn_normDist_T_48 ? 6'h3 : _mux_data_rawIn_normDist_T_98; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_100 = _mux_data_rawIn_normDist_T_49 ? 6'h2 : _mux_data_rawIn_normDist_T_99; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_normDist_T_101 = _mux_data_rawIn_normDist_T_50 ? 6'h1 : _mux_data_rawIn_normDist_T_100; // @[Mux.scala:50:70]
wire [5:0] mux_data_rawIn_normDist = _mux_data_rawIn_normDist_T_51 ? 6'h0 : _mux_data_rawIn_normDist_T_101; // @[Mux.scala:50:70]
wire [114:0] _mux_data_rawIn_subnormFract_T = {63'h0, mux_data_rawIn_fractIn} << mux_data_rawIn_normDist; // @[Mux.scala:50:70]
wire [50:0] _mux_data_rawIn_subnormFract_T_1 = _mux_data_rawIn_subnormFract_T[50:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [51:0] mux_data_rawIn_subnormFract = {_mux_data_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [11:0] _mux_data_rawIn_adjustedExp_T = {6'h3F, ~mux_data_rawIn_normDist}; // @[Mux.scala:50:70]
wire [11:0] _mux_data_rawIn_adjustedExp_T_1 = mux_data_rawIn_isZeroExpIn ? _mux_data_rawIn_adjustedExp_T : {1'h0, mux_data_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _mux_data_rawIn_adjustedExp_T_2 = mux_data_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[package.scala:39:86]
wire [10:0] _mux_data_rawIn_adjustedExp_T_3 = {9'h100, _mux_data_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [12:0] _mux_data_rawIn_adjustedExp_T_4 = {1'h0, _mux_data_rawIn_adjustedExp_T_1} + {2'h0, _mux_data_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [11:0] mux_data_rawIn_adjustedExp = _mux_data_rawIn_adjustedExp_T_4[11:0]; // @[rawFloatFromFN.scala:57:9]
wire [11:0] _mux_data_rawIn_out_sExp_T = mux_data_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28]
wire mux_data_rawIn_isZero = mux_data_rawIn_isZeroExpIn & mux_data_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire mux_data_rawIn_isZero_0 = mux_data_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _mux_data_rawIn_isSpecial_T = mux_data_rawIn_adjustedExp[11:10]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire mux_data_rawIn_isSpecial = &_mux_data_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}]
wire _mux_data_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28]
wire _mux_data_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28]
wire _mux_data_T_9 = mux_data_rawIn_isNaN; // @[recFNFromFN.scala:49:20]
wire [12:0] _mux_data_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42]
wire [53:0] _mux_data_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27]
wire mux_data_rawIn_isInf; // @[rawFloatFromFN.scala:63:19]
wire [12:0] mux_data_rawIn_sExp; // @[rawFloatFromFN.scala:63:19]
wire [53:0] mux_data_rawIn_sig; // @[rawFloatFromFN.scala:63:19]
wire _mux_data_rawIn_out_isNaN_T = ~mux_data_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _mux_data_rawIn_out_isNaN_T_1 = mux_data_rawIn_isSpecial & _mux_data_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign mux_data_rawIn_isNaN = _mux_data_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _mux_data_rawIn_out_isInf_T = mux_data_rawIn_isSpecial & mux_data_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign mux_data_rawIn_isInf = _mux_data_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _mux_data_rawIn_out_sExp_T_1 = {1'h0, _mux_data_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}]
assign mux_data_rawIn_sExp = _mux_data_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _mux_data_rawIn_out_sig_T = ~mux_data_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _mux_data_rawIn_out_sig_T_1 = {1'h0, _mux_data_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [51:0] _mux_data_rawIn_out_sig_T_2 = mux_data_rawIn_isZeroExpIn ? mux_data_rawIn_subnormFract : mux_data_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _mux_data_rawIn_out_sig_T_3 = {_mux_data_rawIn_out_sig_T_1, _mux_data_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign mux_data_rawIn_sig = _mux_data_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _mux_data_T_7 = mux_data_rawIn_sExp[11:9]; // @[recFNFromFN.scala:48:50]
wire [2:0] _mux_data_T_8 = mux_data_rawIn_isZero_0 ? 3'h0 : _mux_data_T_7; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _mux_data_T_10 = {_mux_data_T_8[2:1], _mux_data_T_8[0] | _mux_data_T_9}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _mux_data_T_11 = {mux_data_rawIn_sign_0, _mux_data_T_10}; // @[recFNFromFN.scala:47:20, :48:76]
wire [8:0] _mux_data_T_12 = mux_data_rawIn_sExp[8:0]; // @[recFNFromFN.scala:50:23]
wire [12:0] _mux_data_T_13 = {_mux_data_T_11, _mux_data_T_12}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [51:0] _mux_data_T_14 = mux_data_rawIn_sig[51:0]; // @[recFNFromFN.scala:51:22]
wire [64:0] _mux_data_T_15 = {_mux_data_T_13, _mux_data_T_14}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire mux_data_rawIn_sign_1 = _mux_data_T_6[31]; // @[FPU.scala:431:23]
wire mux_data_rawIn_1_sign = mux_data_rawIn_sign_1; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [7:0] mux_data_rawIn_expIn_1 = _mux_data_T_6[30:23]; // @[FPU.scala:431:23]
wire [22:0] mux_data_rawIn_fractIn_1 = _mux_data_T_6[22:0]; // @[FPU.scala:431:23]
wire mux_data_rawIn_isZeroExpIn_1 = mux_data_rawIn_expIn_1 == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire mux_data_rawIn_isZeroFractIn_1 = mux_data_rawIn_fractIn_1 == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _mux_data_rawIn_normDist_T_102 = mux_data_rawIn_fractIn_1[0]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_103 = mux_data_rawIn_fractIn_1[1]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_104 = mux_data_rawIn_fractIn_1[2]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_105 = mux_data_rawIn_fractIn_1[3]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_106 = mux_data_rawIn_fractIn_1[4]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_107 = mux_data_rawIn_fractIn_1[5]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_108 = mux_data_rawIn_fractIn_1[6]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_109 = mux_data_rawIn_fractIn_1[7]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_110 = mux_data_rawIn_fractIn_1[8]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_111 = mux_data_rawIn_fractIn_1[9]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_112 = mux_data_rawIn_fractIn_1[10]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_113 = mux_data_rawIn_fractIn_1[11]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_114 = mux_data_rawIn_fractIn_1[12]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_115 = mux_data_rawIn_fractIn_1[13]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_116 = mux_data_rawIn_fractIn_1[14]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_117 = mux_data_rawIn_fractIn_1[15]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_118 = mux_data_rawIn_fractIn_1[16]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_119 = mux_data_rawIn_fractIn_1[17]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_120 = mux_data_rawIn_fractIn_1[18]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_121 = mux_data_rawIn_fractIn_1[19]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_122 = mux_data_rawIn_fractIn_1[20]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_123 = mux_data_rawIn_fractIn_1[21]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_124 = mux_data_rawIn_fractIn_1[22]; // @[rawFloatFromFN.scala:46:21]
wire [4:0] _mux_data_rawIn_normDist_T_125 = _mux_data_rawIn_normDist_T_103 ? 5'h15 : 5'h16; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_126 = _mux_data_rawIn_normDist_T_104 ? 5'h14 : _mux_data_rawIn_normDist_T_125; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_127 = _mux_data_rawIn_normDist_T_105 ? 5'h13 : _mux_data_rawIn_normDist_T_126; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_128 = _mux_data_rawIn_normDist_T_106 ? 5'h12 : _mux_data_rawIn_normDist_T_127; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_129 = _mux_data_rawIn_normDist_T_107 ? 5'h11 : _mux_data_rawIn_normDist_T_128; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_130 = _mux_data_rawIn_normDist_T_108 ? 5'h10 : _mux_data_rawIn_normDist_T_129; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_131 = _mux_data_rawIn_normDist_T_109 ? 5'hF : _mux_data_rawIn_normDist_T_130; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_132 = _mux_data_rawIn_normDist_T_110 ? 5'hE : _mux_data_rawIn_normDist_T_131; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_133 = _mux_data_rawIn_normDist_T_111 ? 5'hD : _mux_data_rawIn_normDist_T_132; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_134 = _mux_data_rawIn_normDist_T_112 ? 5'hC : _mux_data_rawIn_normDist_T_133; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_135 = _mux_data_rawIn_normDist_T_113 ? 5'hB : _mux_data_rawIn_normDist_T_134; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_136 = _mux_data_rawIn_normDist_T_114 ? 5'hA : _mux_data_rawIn_normDist_T_135; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_137 = _mux_data_rawIn_normDist_T_115 ? 5'h9 : _mux_data_rawIn_normDist_T_136; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_138 = _mux_data_rawIn_normDist_T_116 ? 5'h8 : _mux_data_rawIn_normDist_T_137; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_139 = _mux_data_rawIn_normDist_T_117 ? 5'h7 : _mux_data_rawIn_normDist_T_138; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_140 = _mux_data_rawIn_normDist_T_118 ? 5'h6 : _mux_data_rawIn_normDist_T_139; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_141 = _mux_data_rawIn_normDist_T_119 ? 5'h5 : _mux_data_rawIn_normDist_T_140; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_142 = _mux_data_rawIn_normDist_T_120 ? 5'h4 : _mux_data_rawIn_normDist_T_141; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_143 = _mux_data_rawIn_normDist_T_121 ? 5'h3 : _mux_data_rawIn_normDist_T_142; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_144 = _mux_data_rawIn_normDist_T_122 ? 5'h2 : _mux_data_rawIn_normDist_T_143; // @[Mux.scala:50:70]
wire [4:0] _mux_data_rawIn_normDist_T_145 = _mux_data_rawIn_normDist_T_123 ? 5'h1 : _mux_data_rawIn_normDist_T_144; // @[Mux.scala:50:70]
wire [4:0] mux_data_rawIn_normDist_1 = _mux_data_rawIn_normDist_T_124 ? 5'h0 : _mux_data_rawIn_normDist_T_145; // @[Mux.scala:50:70]
wire [53:0] _mux_data_rawIn_subnormFract_T_2 = {31'h0, mux_data_rawIn_fractIn_1} << mux_data_rawIn_normDist_1; // @[Mux.scala:50:70]
wire [21:0] _mux_data_rawIn_subnormFract_T_3 = _mux_data_rawIn_subnormFract_T_2[21:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [22:0] mux_data_rawIn_subnormFract_1 = {_mux_data_rawIn_subnormFract_T_3, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [8:0] _mux_data_rawIn_adjustedExp_T_5 = {4'hF, ~mux_data_rawIn_normDist_1}; // @[Mux.scala:50:70]
wire [8:0] _mux_data_rawIn_adjustedExp_T_6 = mux_data_rawIn_isZeroExpIn_1 ? _mux_data_rawIn_adjustedExp_T_5 : {1'h0, mux_data_rawIn_expIn_1}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _mux_data_rawIn_adjustedExp_T_7 = mux_data_rawIn_isZeroExpIn_1 ? 2'h2 : 2'h1; // @[package.scala:39:86]
wire [7:0] _mux_data_rawIn_adjustedExp_T_8 = {6'h20, _mux_data_rawIn_adjustedExp_T_7}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [9:0] _mux_data_rawIn_adjustedExp_T_9 = {1'h0, _mux_data_rawIn_adjustedExp_T_6} + {2'h0, _mux_data_rawIn_adjustedExp_T_8}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [8:0] mux_data_rawIn_adjustedExp_1 = _mux_data_rawIn_adjustedExp_T_9[8:0]; // @[rawFloatFromFN.scala:57:9]
wire [8:0] _mux_data_rawIn_out_sExp_T_2 = mux_data_rawIn_adjustedExp_1; // @[rawFloatFromFN.scala:57:9, :68:28]
wire mux_data_rawIn_isZero_1 = mux_data_rawIn_isZeroExpIn_1 & mux_data_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire mux_data_rawIn_1_isZero = mux_data_rawIn_isZero_1; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _mux_data_rawIn_isSpecial_T_1 = mux_data_rawIn_adjustedExp_1[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire mux_data_rawIn_isSpecial_1 = &_mux_data_rawIn_isSpecial_T_1; // @[rawFloatFromFN.scala:61:{32,57}]
wire _mux_data_rawIn_out_isNaN_T_3; // @[rawFloatFromFN.scala:64:28]
wire _mux_data_rawIn_out_isInf_T_1; // @[rawFloatFromFN.scala:65:28]
wire _mux_data_T_18 = mux_data_rawIn_1_isNaN; // @[recFNFromFN.scala:49:20]
wire [9:0] _mux_data_rawIn_out_sExp_T_3; // @[rawFloatFromFN.scala:68:42]
wire [24:0] _mux_data_rawIn_out_sig_T_7; // @[rawFloatFromFN.scala:70:27]
wire mux_data_rawIn_1_isInf; // @[rawFloatFromFN.scala:63:19]
wire [9:0] mux_data_rawIn_1_sExp; // @[rawFloatFromFN.scala:63:19]
wire [24:0] mux_data_rawIn_1_sig; // @[rawFloatFromFN.scala:63:19]
wire _mux_data_rawIn_out_isNaN_T_2 = ~mux_data_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _mux_data_rawIn_out_isNaN_T_3 = mux_data_rawIn_isSpecial_1 & _mux_data_rawIn_out_isNaN_T_2; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign mux_data_rawIn_1_isNaN = _mux_data_rawIn_out_isNaN_T_3; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _mux_data_rawIn_out_isInf_T_1 = mux_data_rawIn_isSpecial_1 & mux_data_rawIn_isZeroFractIn_1; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign mux_data_rawIn_1_isInf = _mux_data_rawIn_out_isInf_T_1; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _mux_data_rawIn_out_sExp_T_3 = {1'h0, _mux_data_rawIn_out_sExp_T_2}; // @[rawFloatFromFN.scala:68:{28,42}]
assign mux_data_rawIn_1_sExp = _mux_data_rawIn_out_sExp_T_3; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _mux_data_rawIn_out_sig_T_4 = ~mux_data_rawIn_isZero_1; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _mux_data_rawIn_out_sig_T_5 = {1'h0, _mux_data_rawIn_out_sig_T_4}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [22:0] _mux_data_rawIn_out_sig_T_6 = mux_data_rawIn_isZeroExpIn_1 ? mux_data_rawIn_subnormFract_1 : mux_data_rawIn_fractIn_1; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _mux_data_rawIn_out_sig_T_7 = {_mux_data_rawIn_out_sig_T_5, _mux_data_rawIn_out_sig_T_6}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign mux_data_rawIn_1_sig = _mux_data_rawIn_out_sig_T_7; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _mux_data_T_16 = mux_data_rawIn_1_sExp[8:6]; // @[recFNFromFN.scala:48:50]
wire [2:0] _mux_data_T_17 = mux_data_rawIn_1_isZero ? 3'h0 : _mux_data_T_16; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _mux_data_T_19 = {_mux_data_T_17[2:1], _mux_data_T_17[0] | _mux_data_T_18}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _mux_data_T_20 = {mux_data_rawIn_1_sign, _mux_data_T_19}; // @[recFNFromFN.scala:47:20, :48:76]
wire [5:0] _mux_data_T_21 = mux_data_rawIn_1_sExp[5:0]; // @[recFNFromFN.scala:50:23]
wire [9:0] _mux_data_T_22 = {_mux_data_T_20, _mux_data_T_21}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [22:0] _mux_data_T_23 = mux_data_rawIn_1_sig[22:0]; // @[recFNFromFN.scala:51:22]
wire [32:0] _mux_data_T_24 = {_mux_data_T_22, _mux_data_T_23}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire mux_data_rawIn_sign_2 = _mux_data_T_6[15]; // @[FPU.scala:431:23]
wire mux_data_rawIn_2_sign = mux_data_rawIn_sign_2; // @[rawFloatFromFN.scala:44:18, :63:19]
wire [4:0] mux_data_rawIn_expIn_2 = _mux_data_T_6[14:10]; // @[FPU.scala:431:23]
wire [9:0] mux_data_rawIn_fractIn_2 = _mux_data_T_6[9:0]; // @[FPU.scala:431:23]
wire mux_data_rawIn_isZeroExpIn_2 = mux_data_rawIn_expIn_2 == 5'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire mux_data_rawIn_isZeroFractIn_2 = mux_data_rawIn_fractIn_2 == 10'h0; // @[rawFloatFromFN.scala:46:21, :49:34]
wire _mux_data_rawIn_normDist_T_146 = mux_data_rawIn_fractIn_2[0]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_147 = mux_data_rawIn_fractIn_2[1]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_148 = mux_data_rawIn_fractIn_2[2]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_149 = mux_data_rawIn_fractIn_2[3]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_150 = mux_data_rawIn_fractIn_2[4]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_151 = mux_data_rawIn_fractIn_2[5]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_152 = mux_data_rawIn_fractIn_2[6]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_153 = mux_data_rawIn_fractIn_2[7]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_154 = mux_data_rawIn_fractIn_2[8]; // @[rawFloatFromFN.scala:46:21]
wire _mux_data_rawIn_normDist_T_155 = mux_data_rawIn_fractIn_2[9]; // @[rawFloatFromFN.scala:46:21]
wire [3:0] _mux_data_rawIn_normDist_T_156 = {3'h4, ~_mux_data_rawIn_normDist_T_147}; // @[Mux.scala:50:70]
wire [3:0] _mux_data_rawIn_normDist_T_157 = _mux_data_rawIn_normDist_T_148 ? 4'h7 : _mux_data_rawIn_normDist_T_156; // @[Mux.scala:50:70]
wire [3:0] _mux_data_rawIn_normDist_T_158 = _mux_data_rawIn_normDist_T_149 ? 4'h6 : _mux_data_rawIn_normDist_T_157; // @[Mux.scala:50:70]
wire [3:0] _mux_data_rawIn_normDist_T_159 = _mux_data_rawIn_normDist_T_150 ? 4'h5 : _mux_data_rawIn_normDist_T_158; // @[Mux.scala:50:70]
wire [3:0] _mux_data_rawIn_normDist_T_160 = _mux_data_rawIn_normDist_T_151 ? 4'h4 : _mux_data_rawIn_normDist_T_159; // @[Mux.scala:50:70]
wire [3:0] _mux_data_rawIn_normDist_T_161 = _mux_data_rawIn_normDist_T_152 ? 4'h3 : _mux_data_rawIn_normDist_T_160; // @[Mux.scala:50:70]
wire [3:0] _mux_data_rawIn_normDist_T_162 = _mux_data_rawIn_normDist_T_153 ? 4'h2 : _mux_data_rawIn_normDist_T_161; // @[Mux.scala:50:70]
wire [3:0] _mux_data_rawIn_normDist_T_163 = _mux_data_rawIn_normDist_T_154 ? 4'h1 : _mux_data_rawIn_normDist_T_162; // @[Mux.scala:50:70]
wire [3:0] mux_data_rawIn_normDist_2 = _mux_data_rawIn_normDist_T_155 ? 4'h0 : _mux_data_rawIn_normDist_T_163; // @[Mux.scala:50:70]
wire [24:0] _mux_data_rawIn_subnormFract_T_4 = {15'h0, mux_data_rawIn_fractIn_2} << mux_data_rawIn_normDist_2; // @[Mux.scala:50:70]
wire [8:0] _mux_data_rawIn_subnormFract_T_5 = _mux_data_rawIn_subnormFract_T_4[8:0]; // @[rawFloatFromFN.scala:52:{33,46}]
wire [9:0] mux_data_rawIn_subnormFract_2 = {_mux_data_rawIn_subnormFract_T_5, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}]
wire [5:0] _mux_data_rawIn_adjustedExp_T_10 = {2'h3, ~mux_data_rawIn_normDist_2}; // @[Mux.scala:50:70]
wire [5:0] _mux_data_rawIn_adjustedExp_T_11 = mux_data_rawIn_isZeroExpIn_2 ? _mux_data_rawIn_adjustedExp_T_10 : {1'h0, mux_data_rawIn_expIn_2}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18]
wire [1:0] _mux_data_rawIn_adjustedExp_T_12 = mux_data_rawIn_isZeroExpIn_2 ? 2'h2 : 2'h1; // @[package.scala:39:86]
wire [4:0] _mux_data_rawIn_adjustedExp_T_13 = {3'h4, _mux_data_rawIn_adjustedExp_T_12}; // @[rawFloatFromFN.scala:58:{9,14}]
wire [6:0] _mux_data_rawIn_adjustedExp_T_14 = {1'h0, _mux_data_rawIn_adjustedExp_T_11} + {2'h0, _mux_data_rawIn_adjustedExp_T_13}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9]
wire [5:0] mux_data_rawIn_adjustedExp_2 = _mux_data_rawIn_adjustedExp_T_14[5:0]; // @[rawFloatFromFN.scala:57:9]
wire [5:0] _mux_data_rawIn_out_sExp_T_4 = mux_data_rawIn_adjustedExp_2; // @[rawFloatFromFN.scala:57:9, :68:28]
wire mux_data_rawIn_isZero_2 = mux_data_rawIn_isZeroExpIn_2 & mux_data_rawIn_isZeroFractIn_2; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30]
wire mux_data_rawIn_2_isZero = mux_data_rawIn_isZero_2; // @[rawFloatFromFN.scala:60:30, :63:19]
wire [1:0] _mux_data_rawIn_isSpecial_T_2 = mux_data_rawIn_adjustedExp_2[5:4]; // @[rawFloatFromFN.scala:57:9, :61:32]
wire mux_data_rawIn_isSpecial_2 = &_mux_data_rawIn_isSpecial_T_2; // @[rawFloatFromFN.scala:61:{32,57}]
wire _mux_data_rawIn_out_isNaN_T_5; // @[rawFloatFromFN.scala:64:28]
wire _mux_data_rawIn_out_isInf_T_2; // @[rawFloatFromFN.scala:65:28]
wire _mux_data_T_27 = mux_data_rawIn_2_isNaN; // @[recFNFromFN.scala:49:20]
wire [6:0] _mux_data_rawIn_out_sExp_T_5; // @[rawFloatFromFN.scala:68:42]
wire [11:0] _mux_data_rawIn_out_sig_T_11; // @[rawFloatFromFN.scala:70:27]
wire mux_data_rawIn_2_isInf; // @[rawFloatFromFN.scala:63:19]
wire [6:0] mux_data_rawIn_2_sExp; // @[rawFloatFromFN.scala:63:19]
wire [11:0] mux_data_rawIn_2_sig; // @[rawFloatFromFN.scala:63:19]
wire _mux_data_rawIn_out_isNaN_T_4 = ~mux_data_rawIn_isZeroFractIn_2; // @[rawFloatFromFN.scala:49:34, :64:31]
assign _mux_data_rawIn_out_isNaN_T_5 = mux_data_rawIn_isSpecial_2 & _mux_data_rawIn_out_isNaN_T_4; // @[rawFloatFromFN.scala:61:57, :64:{28,31}]
assign mux_data_rawIn_2_isNaN = _mux_data_rawIn_out_isNaN_T_5; // @[rawFloatFromFN.scala:63:19, :64:28]
assign _mux_data_rawIn_out_isInf_T_2 = mux_data_rawIn_isSpecial_2 & mux_data_rawIn_isZeroFractIn_2; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28]
assign mux_data_rawIn_2_isInf = _mux_data_rawIn_out_isInf_T_2; // @[rawFloatFromFN.scala:63:19, :65:28]
assign _mux_data_rawIn_out_sExp_T_5 = {1'h0, _mux_data_rawIn_out_sExp_T_4}; // @[rawFloatFromFN.scala:68:{28,42}]
assign mux_data_rawIn_2_sExp = _mux_data_rawIn_out_sExp_T_5; // @[rawFloatFromFN.scala:63:19, :68:42]
wire _mux_data_rawIn_out_sig_T_8 = ~mux_data_rawIn_isZero_2; // @[rawFloatFromFN.scala:60:30, :70:19]
wire [1:0] _mux_data_rawIn_out_sig_T_9 = {1'h0, _mux_data_rawIn_out_sig_T_8}; // @[rawFloatFromFN.scala:70:{16,19}]
wire [9:0] _mux_data_rawIn_out_sig_T_10 = mux_data_rawIn_isZeroExpIn_2 ? mux_data_rawIn_subnormFract_2 : mux_data_rawIn_fractIn_2; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33]
assign _mux_data_rawIn_out_sig_T_11 = {_mux_data_rawIn_out_sig_T_9, _mux_data_rawIn_out_sig_T_10}; // @[rawFloatFromFN.scala:70:{16,27,33}]
assign mux_data_rawIn_2_sig = _mux_data_rawIn_out_sig_T_11; // @[rawFloatFromFN.scala:63:19, :70:27]
wire [2:0] _mux_data_T_25 = mux_data_rawIn_2_sExp[5:3]; // @[recFNFromFN.scala:48:50]
wire [2:0] _mux_data_T_26 = mux_data_rawIn_2_isZero ? 3'h0 : _mux_data_T_25; // @[recFNFromFN.scala:48:{15,50}]
wire [2:0] _mux_data_T_28 = {_mux_data_T_26[2:1], _mux_data_T_26[0] | _mux_data_T_27}; // @[recFNFromFN.scala:48:{15,76}, :49:20]
wire [3:0] _mux_data_T_29 = {mux_data_rawIn_2_sign, _mux_data_T_28}; // @[recFNFromFN.scala:47:20, :48:76]
wire [2:0] _mux_data_T_30 = mux_data_rawIn_2_sExp[2:0]; // @[recFNFromFN.scala:50:23]
wire [6:0] _mux_data_T_31 = {_mux_data_T_29, _mux_data_T_30}; // @[recFNFromFN.scala:47:20, :49:45, :50:23]
wire [9:0] _mux_data_T_32 = mux_data_rawIn_2_sig[9:0]; // @[recFNFromFN.scala:51:22]
wire [16:0] _mux_data_T_33 = {_mux_data_T_31, _mux_data_T_32}; // @[recFNFromFN.scala:49:45, :50:41, :51:22]
wire [3:0] _mux_data_swizzledNaN_T = _mux_data_T_24[32:29]; // @[FPU.scala:337:8]
wire [6:0] _mux_data_swizzledNaN_T_1 = _mux_data_T_24[22:16]; // @[FPU.scala:338:8]
wire [6:0] _mux_data_swizzledNaN_T_5 = _mux_data_T_24[22:16]; // @[FPU.scala:338:8, :341:8]
wire _mux_data_swizzledNaN_T_2 = &_mux_data_swizzledNaN_T_1; // @[FPU.scala:338:{8,42}]
wire [3:0] _mux_data_swizzledNaN_T_3 = _mux_data_T_24[27:24]; // @[FPU.scala:339:8]
wire _mux_data_swizzledNaN_T_4 = _mux_data_T_33[15]; // @[FPU.scala:340:8]
wire _mux_data_swizzledNaN_T_6 = _mux_data_T_33[16]; // @[FPU.scala:342:8]
wire [14:0] _mux_data_swizzledNaN_T_7 = _mux_data_T_33[14:0]; // @[FPU.scala:343:8]
wire [7:0] mux_data_swizzledNaN_lo_hi = {_mux_data_swizzledNaN_T_5, _mux_data_swizzledNaN_T_6}; // @[FPU.scala:336:26, :341:8, :342:8]
wire [22:0] mux_data_swizzledNaN_lo = {mux_data_swizzledNaN_lo_hi, _mux_data_swizzledNaN_T_7}; // @[FPU.scala:336:26, :343:8]
wire [4:0] mux_data_swizzledNaN_hi_lo = {_mux_data_swizzledNaN_T_3, _mux_data_swizzledNaN_T_4}; // @[FPU.scala:336:26, :339:8, :340:8]
wire [4:0] mux_data_swizzledNaN_hi_hi = {_mux_data_swizzledNaN_T, _mux_data_swizzledNaN_T_2}; // @[FPU.scala:336:26, :337:8, :338:42]
wire [9:0] mux_data_swizzledNaN_hi = {mux_data_swizzledNaN_hi_hi, mux_data_swizzledNaN_hi_lo}; // @[FPU.scala:336:26]
wire [32:0] mux_data_swizzledNaN = {mux_data_swizzledNaN_hi, mux_data_swizzledNaN_lo}; // @[FPU.scala:336:26]
wire [2:0] _mux_data_T_34 = _mux_data_T_24[31:29]; // @[FPU.scala:249:25]
wire _mux_data_T_35 = &_mux_data_T_34; // @[FPU.scala:249:{25,56}]
wire [32:0] _mux_data_T_36 = _mux_data_T_35 ? mux_data_swizzledNaN : _mux_data_T_24; // @[FPU.scala:249:56, :336:26, :344:8]
wire [3:0] _mux_data_swizzledNaN_T_8 = _mux_data_T_15[64:61]; // @[FPU.scala:337:8]
wire [19:0] _mux_data_swizzledNaN_T_9 = _mux_data_T_15[51:32]; // @[FPU.scala:338:8]
wire [19:0] _mux_data_swizzledNaN_T_13 = _mux_data_T_15[51:32]; // @[FPU.scala:338:8, :341:8]
wire _mux_data_swizzledNaN_T_10 = &_mux_data_swizzledNaN_T_9; // @[FPU.scala:338:{8,42}]
wire [6:0] _mux_data_swizzledNaN_T_11 = _mux_data_T_15[59:53]; // @[FPU.scala:339:8]
wire _mux_data_swizzledNaN_T_12 = _mux_data_T_36[31]; // @[FPU.scala:340:8, :344:8]
wire _mux_data_swizzledNaN_T_14 = _mux_data_T_36[32]; // @[FPU.scala:342:8, :344:8]
wire [30:0] _mux_data_swizzledNaN_T_15 = _mux_data_T_36[30:0]; // @[FPU.scala:343:8, :344:8]
wire [20:0] mux_data_swizzledNaN_lo_hi_1 = {_mux_data_swizzledNaN_T_13, _mux_data_swizzledNaN_T_14}; // @[FPU.scala:336:26, :341:8, :342:8]
wire [51:0] mux_data_swizzledNaN_lo_1 = {mux_data_swizzledNaN_lo_hi_1, _mux_data_swizzledNaN_T_15}; // @[FPU.scala:336:26, :343:8]
wire [7:0] mux_data_swizzledNaN_hi_lo_1 = {_mux_data_swizzledNaN_T_11, _mux_data_swizzledNaN_T_12}; // @[FPU.scala:336:26, :339:8, :340:8]
wire [4:0] mux_data_swizzledNaN_hi_hi_1 = {_mux_data_swizzledNaN_T_8, _mux_data_swizzledNaN_T_10}; // @[FPU.scala:336:26, :337:8, :338:42]
wire [12:0] mux_data_swizzledNaN_hi_1 = {mux_data_swizzledNaN_hi_hi_1, mux_data_swizzledNaN_hi_lo_1}; // @[FPU.scala:336:26]
wire [64:0] mux_data_swizzledNaN_1 = {mux_data_swizzledNaN_hi_1, mux_data_swizzledNaN_lo_1}; // @[FPU.scala:336:26]
wire [2:0] _mux_data_T_37 = _mux_data_T_15[63:61]; // @[FPU.scala:249:25]
wire _mux_data_T_38 = &_mux_data_T_37; // @[FPU.scala:249:{25,56}]
wire [64:0] _mux_data_T_39 = _mux_data_T_38 ? mux_data_swizzledNaN_1 : _mux_data_T_15; // @[FPU.scala:249:56, :336:26, :344:8]
wire [63:0] intValue_res; // @[FPU.scala:542:26]
wire [63:0] intValue = intValue_res; // @[FPU.scala:542:26, :549:9]
wire [31:0] intValue_smallInt = in_bits_in1[31:0]; // @[Valid.scala:135:21]
wire [31:0] _intValue_res_T_3 = intValue_smallInt; // @[FPU.scala:544:33, :546:60]
wire _intValue_T = in_bits_typ[1]; // @[Valid.scala:135:21]
wire _intValue_T_1 = ~_intValue_T; // @[package.scala:163:13]
wire _intValue_res_T_1 = in_bits_typ[0]; // @[Valid.scala:135:21]
wire _i2fResults_i2f_io_signedIn_T = in_bits_typ[0]; // @[Valid.scala:135:21]
wire _i2fResults_i2f_io_signedIn_T_2 = in_bits_typ[0]; // @[Valid.scala:135:21]
wire _i2fResults_i2f_io_signedIn_T_4 = in_bits_typ[0]; // @[Valid.scala:135:21]
wire [32:0] _intValue_res_T_2 = {1'h0, intValue_smallInt}; // @[FPU.scala:544:33, :546:45]
wire [32:0] _intValue_res_T_4 = _intValue_res_T_1 ? _intValue_res_T_2 : {_intValue_res_T_3[31], _intValue_res_T_3}; // @[FPU.scala:546:{19,31,45,60}]
assign intValue_res = _intValue_T_1 ? {{31{_intValue_res_T_4[32]}}, _intValue_res_T_4} : _intValue_res_T; // @[FPU.scala:542:{26,39}, :545:{57,66}, :546:{13,19}]
wire _i2fResults_i2f_io_signedIn_T_1 = ~_i2fResults_i2f_io_signedIn_T; // @[FPU.scala:557:{26,38}]
wire _i2fResults_i2f_io_signedIn_T_3 = ~_i2fResults_i2f_io_signedIn_T_2; // @[FPU.scala:557:{26,38}]
wire [32:0] i2fResults_maskedNaN = _i2fResults_i2f_1_io_out & 33'h1EF7FFFFF; // @[FPU.scala:413:25, :556:23]
wire [2:0] _i2fResults_T = _i2fResults_i2f_1_io_out[31:29]; // @[FPU.scala:249:25, :556:23]
wire _i2fResults_T_1 = &_i2fResults_T; // @[FPU.scala:249:{25,56}]
wire [32:0] i2fResults_1_1 = _i2fResults_T_1 ? i2fResults_maskedNaN : _i2fResults_i2f_1_io_out; // @[FPU.scala:249:56, :413:25, :414:10, :556:23]
wire _i2fResults_i2f_io_signedIn_T_5 = ~_i2fResults_i2f_io_signedIn_T_4; // @[FPU.scala:557:{26,38}]
wire [64:0] i2fResults_maskedNaN_1 = _i2fResults_i2f_2_io_out & 65'h1EFEFFFFFFFFFFFFF; // @[FPU.scala:413:25, :556:23]
wire [2:0] _i2fResults_T_2 = _i2fResults_i2f_2_io_out[63:61]; // @[FPU.scala:249:25, :556:23]
wire _i2fResults_T_3 = &_i2fResults_T_2; // @[FPU.scala:249:{25,56}]
wire [64:0] i2fResults_2_1 = _i2fResults_T_3 ? i2fResults_maskedNaN_1 : _i2fResults_i2f_2_io_out; // @[FPU.scala:249:56, :413:25, :414:10, :556:23]
wire [47:0] _dataPadded_T = i2fResults_2_1[64:17]; // @[FPU.scala:414:10, :565:55]
wire [64:0] dataPadded_0 = {_dataPadded_T, _i2fResults_i2f_io_out}; // @[FPU.scala:556:23, :565:{44,55}]
wire [31:0] _dataPadded_T_1 = i2fResults_2_1[64:33]; // @[FPU.scala:414:10, :565:55]
wire [64:0] dataPadded_1 = {_dataPadded_T_1, i2fResults_1_1}; // @[FPU.scala:414:10, :565:{44,55}]
wire [64:0] _mux_data_T_41 = _mux_data_T_40 ? dataPadded_1 : dataPadded_0; // @[package.scala:39:{76,86}]
wire [64:0] _mux_data_T_43 = _mux_data_T_42 ? i2fResults_2_1 : _mux_data_T_41; // @[package.scala:39:{76,86}]
wire _mux_data_T_44 = &in_bits_typeTagIn; // @[Valid.scala:135:21]
wire [64:0] _mux_data_T_45 = _mux_data_T_44 ? i2fResults_2_1 : _mux_data_T_43; // @[package.scala:39:{76,86}]
assign mux_data = in_bits_wflags ? _mux_data_T_45 : _mux_data_T_39; // @[Valid.scala:135:21]
wire [4:0] _mux_exc_T_1 = _mux_exc_T ? _i2fResults_i2f_1_io_exceptionFlags : _i2fResults_i2f_io_exceptionFlags; // @[package.scala:39:{76,86}]
wire [4:0] _mux_exc_T_3 = _mux_exc_T_2 ? _i2fResults_i2f_2_io_exceptionFlags : _mux_exc_T_1; // @[package.scala:39:{76,86}]
wire _mux_exc_T_4 = &in_bits_typeTagIn; // @[Valid.scala:135:21]
wire [4:0] _mux_exc_T_5 = _mux_exc_T_4 ? _i2fResults_i2f_2_io_exceptionFlags : _mux_exc_T_3; // @[package.scala:39:{76,86}]
assign mux_exc = in_bits_wflags ? _mux_exc_T_5 : 5'h0; // @[Valid.scala:135:21]
reg io_out_pipe_v; // @[Valid.scala:141:24]
assign io_out_pipe_out_valid = io_out_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [64:0] io_out_pipe_b_data; // @[Valid.scala:142:26]
assign io_out_pipe_out_bits_data = io_out_pipe_b_data; // @[Valid.scala:135:21, :142:26]
reg [4:0] io_out_pipe_b_exc; // @[Valid.scala:142:26]
assign io_out_pipe_out_bits_exc = io_out_pipe_b_exc; // @[Valid.scala:135:21, :142:26]
assign io_out_valid = io_out_pipe_out_valid; // @[Valid.scala:135:21]
assign io_out_bits_data_0 = io_out_pipe_out_bits_data; // @[Valid.scala:135:21]
assign io_out_bits_exc_0 = io_out_pipe_out_bits_exc; // @[Valid.scala:135:21]
always @(posedge clock) begin // @[FPU.scala:528:7]
if (reset) begin // @[FPU.scala:528:7]
in_pipe_v <= 1'h0; // @[Valid.scala:141:24]
io_out_pipe_v <= 1'h0; // @[Valid.scala:141:24]
end
else begin // @[FPU.scala:528:7]
in_pipe_v <= io_in_valid_0; // @[Valid.scala:141:24]
io_out_pipe_v <= in_valid; // @[Valid.scala:135:21, :141:24]
end
if (io_in_valid_0) begin // @[FPU.scala:528:7]
in_pipe_b_ldst <= io_in_bits_ldst_0; // @[Valid.scala:142:26]
in_pipe_b_wen <= io_in_bits_wen_0; // @[Valid.scala:142:26]
in_pipe_b_ren1 <= io_in_bits_ren1_0; // @[Valid.scala:142:26]
in_pipe_b_ren2 <= io_in_bits_ren2_0; // @[Valid.scala:142:26]
in_pipe_b_ren3 <= io_in_bits_ren3_0; // @[Valid.scala:142:26]
in_pipe_b_swap12 <= io_in_bits_swap12_0; // @[Valid.scala:142:26]
in_pipe_b_swap23 <= io_in_bits_swap23_0; // @[Valid.scala:142:26]
in_pipe_b_typeTagIn <= io_in_bits_typeTagIn_0; // @[Valid.scala:142:26]
in_pipe_b_typeTagOut <= io_in_bits_typeTagOut_0; // @[Valid.scala:142:26]
in_pipe_b_fromint <= io_in_bits_fromint_0; // @[Valid.scala:142:26]
in_pipe_b_toint <= io_in_bits_toint_0; // @[Valid.scala:142:26]
in_pipe_b_fastpipe <= io_in_bits_fastpipe_0; // @[Valid.scala:142:26]
in_pipe_b_fma <= io_in_bits_fma_0; // @[Valid.scala:142:26]
in_pipe_b_div <= io_in_bits_div_0; // @[Valid.scala:142:26]
in_pipe_b_sqrt <= io_in_bits_sqrt_0; // @[Valid.scala:142:26]
in_pipe_b_wflags <= io_in_bits_wflags_0; // @[Valid.scala:142:26]
in_pipe_b_vec <= io_in_bits_vec_0; // @[Valid.scala:142:26]
in_pipe_b_rm <= io_in_bits_rm_0; // @[Valid.scala:142:26]
in_pipe_b_typ <= io_in_bits_typ_0; // @[Valid.scala:142:26]
in_pipe_b_in1 <= io_in_bits_in1_0; // @[Valid.scala:142:26]
end
if (in_valid) begin // @[Valid.scala:135:21]
io_out_pipe_b_data <= mux_data; // @[Valid.scala:142:26]
io_out_pipe_b_exc <= mux_exc; // @[Valid.scala:142:26]
end
always @(posedge)
INToRecFN_i64_e5_s11_7 i2fResults_i2f ( // @[FPU.scala:556:23]
.io_signedIn (_i2fResults_i2f_io_signedIn_T_1), // @[FPU.scala:557:26]
.io_in (intValue), // @[FPU.scala:549:9]
.io_roundingMode (in_bits_rm), // @[Valid.scala:135:21]
.io_out (_i2fResults_i2f_io_out),
.io_exceptionFlags (_i2fResults_i2f_io_exceptionFlags)
); // @[FPU.scala:556:23]
INToRecFN_i64_e8_s24_7 i2fResults_i2f_1 ( // @[FPU.scala:556:23]
.io_signedIn (_i2fResults_i2f_io_signedIn_T_3), // @[FPU.scala:557:26]
.io_in (intValue), // @[FPU.scala:549:9]
.io_roundingMode (in_bits_rm), // @[Valid.scala:135:21]
.io_out (_i2fResults_i2f_1_io_out),
.io_exceptionFlags (_i2fResults_i2f_1_io_exceptionFlags)
); // @[FPU.scala:556:23]
INToRecFN_i64_e11_s53_7 i2fResults_i2f_2 ( // @[FPU.scala:556:23]
.io_signedIn (_i2fResults_i2f_io_signedIn_T_5), // @[FPU.scala:557:26]
.io_in (intValue), // @[FPU.scala:549:9]
.io_roundingMode (in_bits_rm), // @[Valid.scala:135:21]
.io_out (_i2fResults_i2f_2_io_out),
.io_exceptionFlags (_i2fResults_i2f_2_io_exceptionFlags)
); // @[FPU.scala:556:23]
assign io_out_bits_data = io_out_bits_data_0; // @[FPU.scala:528:7]
assign io_out_bits_exc = io_out_bits_exc_0; // @[FPU.scala:528:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_49( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_33 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_63 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_65 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_77 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_83 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_wo_ready_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_wo_ready_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [2050:0] _c_opcodes_set_T_1 = 2051'h0; // @[Monitor.scala:767:54]
wire [2050:0] _c_sizes_set_T_1 = 2051'h0; // @[Monitor.scala:768:52]
wire [10:0] _c_opcodes_set_T = 11'h0; // @[Monitor.scala:767:79]
wire [10:0] _c_sizes_set_T = 11'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [255:0] _c_set_wo_ready_T = 256'h1; // @[OneHot.scala:58:35]
wire [255:0] _c_set_T = 256'h1; // @[OneHot.scala:58:35]
wire [515:0] c_opcodes_set = 516'h0; // @[Monitor.scala:740:34]
wire [515:0] c_sizes_set = 516'h0; // @[Monitor.scala:741:34]
wire [128:0] c_set = 129'h0; // @[Monitor.scala:738:34]
wire [128:0] c_set_wo_ready = 129'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [7:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 8'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] _source_ok_T_13 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_19 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_25 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_31 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_14 = _source_ok_T_13 == 6'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 6'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_26 = _source_ok_T_25 == 6'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_32 = _source_ok_T_31 == 6'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_34 = _source_ok_T_32; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_6 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire _source_ok_T_37 = io_in_a_bits_source_0 == 8'h41; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_37; // @[Parameters.scala:1138:31]
wire _source_ok_T_38 = io_in_a_bits_source_0 == 8'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31]
wire _source_ok_T_39 = io_in_a_bits_source_0 == 8'h80; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31]
wire _source_ok_T_40 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_41 = _source_ok_T_40 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_47 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {15'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [2:0] uncommonBits = _uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_1 = _uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_6 = _uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_7 = _uncommonBits_T_7[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_12 = _uncommonBits_T_12[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_13 = _uncommonBits_T_13[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_18 = _uncommonBits_T_18[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_25 = _uncommonBits_T_25[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_30 = _uncommonBits_T_30[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_31 = _uncommonBits_T_31[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_36 = _uncommonBits_T_36[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_37 = _uncommonBits_T_37[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_42 = _uncommonBits_T_42[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_43 = _uncommonBits_T_43[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_48 = _uncommonBits_T_48[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_48 = io_in_d_bits_source_0 == 8'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_48; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_49 = io_in_d_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_55 = io_in_d_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_50 = _source_ok_T_49 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_54; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_56 = _source_ok_T_55 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_60; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] _source_ok_T_61 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_67 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_73 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_79 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_62 = _source_ok_T_61 == 6'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_64 = _source_ok_T_62; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_66 = _source_ok_T_64; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_66; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_68 = _source_ok_T_67 == 6'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_72; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_74 = _source_ok_T_73 == 6'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_78 = _source_ok_T_76; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_5 = _source_ok_T_78; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_80 = _source_ok_T_79 == 6'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_84 = _source_ok_T_82; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_6 = _source_ok_T_84; // @[Parameters.scala:1138:31]
wire _source_ok_T_85 = io_in_d_bits_source_0 == 8'h41; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_85; // @[Parameters.scala:1138:31]
wire _source_ok_T_86 = io_in_d_bits_source_0 == 8'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_86; // @[Parameters.scala:1138:31]
wire _source_ok_T_87 = io_in_d_bits_source_0 == 8'h80; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_9 = _source_ok_T_87; // @[Parameters.scala:1138:31]
wire _source_ok_T_88 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_89 = _source_ok_T_88 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_90 = _source_ok_T_89 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_91 = _source_ok_T_90 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_92 = _source_ok_T_91 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_93 = _source_ok_T_92 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_94 = _source_ok_T_93 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_95 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1115 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1115; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1115; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [7:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_1183 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1183; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1183; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1183; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [7:0] source_1; // @[Monitor.scala:541:22]
reg [128:0] inflight; // @[Monitor.scala:614:27]
reg [515:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [515:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [128:0] a_set; // @[Monitor.scala:626:34]
wire [128:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [515:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [515:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [10:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [10:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [10:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [10:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [10:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [10:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [10:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [10:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [10:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [515:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [515:0] _a_opcode_lookup_T_6 = {512'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [515:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [515:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [515:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [515:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[515:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [255:0] _GEN_2 = 256'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [255:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1048 = _T_1115 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1048 ? _a_set_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1048 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1048 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [10:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [10:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [10:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [2050:0] _a_opcodes_set_T_1 = {2047'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1048 ? _a_opcodes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [2050:0] _a_sizes_set_T_1 = {2047'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1048 ? _a_sizes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [128:0] d_clr; // @[Monitor.scala:664:34]
wire [128:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [515:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [515:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1094 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [255:0] _GEN_5 = 256'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1094 & ~d_release_ack ? _d_clr_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1063 = _T_1183 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1063 ? _d_clr_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_5 = 2063'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1063 ? _d_opcodes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [2062:0] _d_sizes_clr_T_5 = 2063'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1063 ? _d_sizes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [128:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [128:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [128:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [515:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [515:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [515:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [515:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [515:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [515:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [128:0] inflight_1; // @[Monitor.scala:726:35]
wire [128:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [515:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [515:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [515:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [515:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [515:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [515:0] _c_opcode_lookup_T_6 = {512'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [515:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [515:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [515:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [515:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[515:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [128:0] d_clr_1; // @[Monitor.scala:774:34]
wire [128:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [515:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [515:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1159 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1159 & d_release_ack_1 ? _d_clr_wo_ready_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1141 = _T_1183 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1141 ? _d_clr_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_11 = 2063'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1141 ? _d_opcodes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [2062:0] _d_sizes_clr_T_11 = 2063'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1141 ? _d_sizes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 8'h0; // @[Monitor.scala:36:7, :795:113]
wire [128:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [128:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [515:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [515:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [515:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [515:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
File frontend.scala:
//******************************************************************************
// Copyright (c) 2017 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Frontend
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.rocket._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property._
import boom.v4.common._
import boom.v4.exu.{CommitExceptionSignals, BranchDecode, BrUpdateInfo, BranchDecodeSignals}
import boom.v4.util._
class GlobalHistory(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomFrontendParameters
{
// For the dual banked case, each bank ignores the contribution of the
// last bank to the history. Thus we have to track the most recent update to the
// history in that case
val old_history = UInt(globalHistoryLength.W)
val current_saw_branch_not_taken = Bool()
val new_saw_branch_not_taken = Bool()
val new_saw_branch_taken = Bool()
val ras_idx = UInt(log2Ceil(nRasEntries).W)
def histories(bank: Int) = {
if (nBanks == 1) {
old_history
} else {
require(nBanks == 2)
if (bank == 0) {
old_history
} else {
Mux(new_saw_branch_taken , old_history << 1 | 1.U,
Mux(new_saw_branch_not_taken , old_history << 1,
old_history))
}
}
}
def ===(other: GlobalHistory): Bool = {
((old_history === other.old_history) &&
(new_saw_branch_taken === other.new_saw_branch_taken) &&
(new_saw_branch_taken || (new_saw_branch_not_taken === other.new_saw_branch_not_taken))
)
}
def =/=(other: GlobalHistory): Bool = !(this === other)
def update(branches: UInt, cfi_taken: Bool, cfi_is_br: Bool, cfi_idx: UInt,
cfi_valid: Bool, addr: UInt,
cfi_is_call: Bool, cfi_is_ret: Bool): GlobalHistory = {
val cfi_idx_fixed = cfi_idx(log2Ceil(fetchWidth)-1,0)
val cfi_idx_oh = UIntToOH(cfi_idx_fixed)
val new_history = Wire(new GlobalHistory)
val not_taken_branches = branches & Mux(cfi_valid,
MaskLower(cfi_idx_oh) & ~Mux(cfi_is_br && cfi_taken, cfi_idx_oh, 0.U(fetchWidth.W)),
~(0.U(fetchWidth.W)))
if (nBanks == 1) {
// In the single bank case every bank sees the history including the previous bank
new_history := DontCare
new_history.current_saw_branch_not_taken := false.B
val saw_not_taken_branch = not_taken_branches =/= 0.U || current_saw_branch_not_taken
new_history.old_history := Mux(cfi_is_br && cfi_taken && cfi_valid , histories(0) << 1 | 1.U,
Mux(saw_not_taken_branch , histories(0) << 1,
histories(0)))
} else {
// In the two bank case every bank ignore the history added by the previous bank
val base = histories(1)
val cfi_in_bank_0 = cfi_valid && cfi_taken && cfi_idx_fixed < bankWidth.U
val ignore_second_bank = cfi_in_bank_0 || mayNotBeDualBanked(addr)
val first_bank_saw_not_taken = not_taken_branches(bankWidth-1,0) =/= 0.U || current_saw_branch_not_taken
new_history.current_saw_branch_not_taken := false.B
when (ignore_second_bank) {
new_history.old_history := histories(1)
new_history.new_saw_branch_not_taken := first_bank_saw_not_taken
new_history.new_saw_branch_taken := cfi_is_br && cfi_in_bank_0
} .otherwise {
new_history.old_history := Mux(cfi_is_br && cfi_in_bank_0 , histories(1) << 1 | 1.U,
Mux(first_bank_saw_not_taken , histories(1) << 1,
histories(1)))
new_history.new_saw_branch_not_taken := not_taken_branches(fetchWidth-1,bankWidth) =/= 0.U
new_history.new_saw_branch_taken := cfi_valid && cfi_taken && cfi_is_br && !cfi_in_bank_0
}
}
new_history.ras_idx := Mux(cfi_valid && cfi_is_call, WrapInc(ras_idx, nRasEntries),
Mux(cfi_valid && cfi_is_ret , WrapDec(ras_idx, nRasEntries), ras_idx))
new_history
}
}
/**
* Parameters to manage a L1 Banked ICache
*/
trait HasBoomFrontendParameters extends HasL1ICacheParameters
{
// How many banks does the ICache use?
val nBanks = if (cacheParams.fetchBytes <= 8) 1 else 2
// How many bytes wide is a bank?
val bankBytes = fetchBytes/nBanks
val bankWidth = fetchWidth/nBanks
require(nBanks == 1 || nBanks == 2)
// How many "chunks"/interleavings make up a cache line?
val numChunks = cacheParams.blockBytes / bankBytes
// Which bank is the address pointing to?
def bank(addr: UInt) = if (nBanks == 2) addr(log2Ceil(bankBytes)) else 0.U
def isLastBankInBlock(addr: UInt) = {
(nBanks == 2).B && addr(blockOffBits-1, log2Ceil(bankBytes)) === (numChunks-1).U
}
def mayNotBeDualBanked(addr: UInt) = {
require(nBanks == 2)
isLastBankInBlock(addr)
}
def blockAlign(addr: UInt) = ~(~addr | (cacheParams.blockBytes-1).U)
def bankAlign(addr: UInt) = ~(~addr | (bankBytes-1).U)
def fetchIdx(addr: UInt) = addr >> log2Ceil(fetchBytes)
def nextBank(addr: UInt) = bankAlign(addr) + bankBytes.U
def nextFetch(addr: UInt) = {
if (nBanks == 1) {
bankAlign(addr) + bankBytes.U
} else {
require(nBanks == 2)
bankAlign(addr) + Mux(mayNotBeDualBanked(addr), bankBytes.U, fetchBytes.U)
}
}
def fetchMask(addr: UInt) = {
val idx = addr.extract(log2Ceil(fetchWidth)+log2Ceil(coreInstBytes)-1, log2Ceil(coreInstBytes))
if (nBanks == 1) {
((1 << fetchWidth)-1).U << idx
} else {
val shamt = idx.extract(log2Ceil(fetchWidth)-2, 0)
val end_mask = Mux(mayNotBeDualBanked(addr), Fill(fetchWidth/2, 1.U), Fill(fetchWidth, 1.U))
((1 << fetchWidth)-1).U << shamt & end_mask
}
}
def bankMask(addr: UInt) = {
val idx = addr.extract(log2Ceil(fetchWidth)+log2Ceil(coreInstBytes)-1, log2Ceil(coreInstBytes))
if (nBanks == 1) {
1.U(1.W)
} else {
Mux(mayNotBeDualBanked(addr), 1.U(2.W), 3.U(2.W))
}
}
}
/**
* Bundle passed into the FetchBuffer and used to combine multiple
* relevant signals together.
*/
class FetchBundle(implicit p: Parameters) extends BoomBundle
with HasBoomFrontendParameters
{
val pc = UInt(vaddrBitsExtended.W)
val next_pc = UInt(vaddrBitsExtended.W)
val next_fetch = UInt(vaddrBitsExtended.W)
val edge_inst = Vec(nBanks, Bool()) // True if 1st instruction in this bundle is pc - 2
val insts = Vec(fetchWidth, Bits(32.W))
val exp_insts = Vec(fetchWidth, Bits(32.W))
val pcs = Vec(fetchWidth, UInt(vaddrBitsExtended.W))
// Information for sfb folding
// NOTE: This IS NOT equivalent to uop.pc_lob, that gets calculated in the FB
val sfbs = Vec(fetchWidth, Bool())
val sfb_masks = Vec(fetchWidth, UInt((2*fetchWidth).W))
val sfb_dests = Vec(fetchWidth, UInt((1+log2Ceil(fetchBytes)).W))
val shadowable_mask = Vec(fetchWidth, Bool())
val shadowed_mask = Vec(fetchWidth, Bool())
val cfi_idx = Valid(UInt(log2Ceil(fetchWidth).W))
val cfi_type = UInt(CFI_SZ.W)
val cfi_is_call = Bool()
val cfi_is_ret = Bool()
val cfi_npc_plus4 = Bool()
val ras_top = UInt(vaddrBitsExtended.W)
val ftq_idx = UInt(log2Ceil(ftqSz).W)
val mask = UInt(fetchWidth.W) // mark which words are valid instructions
val br_mask = UInt(fetchWidth.W)
val ghist = new GlobalHistory
val lhist = Vec(nBanks, UInt(localHistoryLength.W))
val xcpt_pf_if = Bool() // I-TLB miss (instruction fetch fault).
val xcpt_ae_if = Bool() // Access exception.
val bp_debug_if_oh= Vec(fetchWidth, Bool())
val bp_xcpt_if_oh = Vec(fetchWidth, Bool())
val end_half = Valid(UInt(16.W))
val bpd_meta = Vec(nBanks, UInt())
// Source of the prediction from this bundle
val fsrc = UInt(BSRC_SZ.W)
// Source of the prediction to this bundle
val tsrc = UInt(BSRC_SZ.W)
}
/**
* IO for the BOOM Frontend to/from the CPU
*/
class BoomFrontendIO(implicit p: Parameters) extends BoomBundle
{
// Give the backend a packet of instructions.
val fetchpacket = Flipped(new DecoupledIO(new FetchBufferResp))
// 1 for xcpt/jalr/auipc/flush
val arb_ftq_reqs = Output(Vec(3, UInt(log2Ceil(ftqSz).W)))
val rrd_ftq_resps = Input(Vec(3, new FTQInfo))
val com_pc = Input(UInt(vaddrBitsExtended.W))
val debug_ftq_idx = Output(Vec(coreWidth, UInt(log2Ceil(ftqSz).W)))
val debug_fetch_pc = Input(Vec(coreWidth, UInt(vaddrBitsExtended.W)))
// Breakpoint info
val status = Output(new MStatus)
val bp = Output(Vec(nBreakpoints, new BP))
val mcontext = Output(UInt(coreParams.mcontextWidth.W))
val scontext = Output(UInt(coreParams.scontextWidth.W))
val sfence = Valid(new SFenceReq)
val brupdate = Output(new BrUpdateInfo)
// Redirects change the PC
val redirect_flush = Output(Bool()) // Flush and hang the frontend?
val redirect_val = Output(Bool()) // Redirect the frontend?
val redirect_pc = Output(UInt()) // Where do we redirect to?
val redirect_ftq_idx = Output(UInt()) // Which ftq entry should we reset to?
val redirect_ghist = Output(new GlobalHistory) // What are we setting as the global history?
val commit = Valid(UInt(ftqSz.W))
val flush_icache = Output(Bool())
val enable_bpd = Output(Bool())
val perf = Input(new FrontendPerfEvents)
}
/**
* Top level Frontend class
*
* @param icacheParams parameters for the icache
* @param hartid id for the hardware thread of the core
*/
class BoomFrontend(val icacheParams: ICacheParams, staticIdForMetadataUseOnly: Int)(implicit p: Parameters) extends LazyModule
{
lazy val module = new BoomFrontendModule(this)
val icache = LazyModule(new boom.v4.ifu.ICache(icacheParams, staticIdForMetadataUseOnly))
val masterNode = icache.masterNode
val resetVectorSinkNode = BundleBridgeSink[UInt](Some(() =>
UInt(masterNode.edges.out.head.bundle.addressBits.W)))
}
/**
* Bundle wrapping the IO for the Frontend as a whole
*
* @param outer top level Frontend class
*/
class BoomFrontendBundle(val outer: BoomFrontend) extends CoreBundle()(outer.p)
{
val cpu = Flipped(new BoomFrontendIO())
val ptw = new TLBPTWIO()
val errors = new ICacheErrors
}
/**
* Main Frontend module that connects the icache, TLB, fetch controller,
* and branch prediction pipeline together.
*
* @param outer top level Frontend class
*/
class BoomFrontendModule(outer: BoomFrontend) extends LazyModuleImp(outer)
with HasBoomCoreParameters
with HasBoomFrontendParameters
{
val io = IO(new BoomFrontendBundle(outer))
io.errors := DontCare
val io_reset_vector = outer.resetVectorSinkNode.bundle
implicit val edge = outer.masterNode.edges.out(0)
require(fetchWidth*coreInstBytes == outer.icacheParams.fetchBytes)
val bpd = Module(new BranchPredictor)
bpd.io.f3_fire := false.B
val ras = Module(new BoomRAS)
val icache = outer.icache.module
icache.io.invalidate := io.cpu.flush_icache
val tlb = Module(new TLB(true, log2Ceil(fetchBytes), TLBConfig(nTLBSets, nTLBWays)))
io.ptw <> tlb.io.ptw
io.cpu.perf.tlbMiss := io.ptw.req.fire
io.cpu.perf.acquire := icache.io.perf.acquire
// --------------------------------------------------------
// **** NextPC Select (F0) ****
// Send request to ICache
// --------------------------------------------------------
val s0_vpc = WireInit(0.U(vaddrBitsExtended.W))
val s0_ghist = WireInit((0.U).asTypeOf(new GlobalHistory))
val s0_tsrc = WireInit(0.U(BSRC_SZ.W))
dontTouch(s0_tsrc)
val s0_valid = WireInit(false.B)
val s0_is_replay = WireInit(false.B)
val s0_is_sfence = WireInit(false.B)
val s0_replay_resp = Wire(new TLBResp(log2Ceil(fetchBytes)))
val s0_replay_ppc = Wire(UInt())
icache.io.req.valid := s0_valid
icache.io.req.bits.addr := s0_vpc
bpd.io.f0_req.valid := s0_valid && io.cpu.enable_bpd
bpd.io.f0_req.bits.pc := Mux(io.cpu.enable_bpd, s0_vpc, 0.U)
bpd.io.f0_req.bits.ghist := Mux(io.cpu.enable_bpd, s0_ghist, 0.U.asTypeOf(new GlobalHistory))
// --------------------------------------------------------
// **** ICache Access (F1) ****
// Translate VPC
// --------------------------------------------------------
val s1_vpc = RegNext(s0_vpc)
val s1_valid = RegNext(s0_valid, false.B)
val s1_ghist = RegNext(s0_ghist)
val s1_is_replay = RegNext(s0_is_replay)
val s1_is_sfence = RegNext(s0_is_sfence)
val f1_clear = WireInit(false.B)
val s1_tsrc = RegNext(s0_tsrc)
tlb.io.req.valid := (s1_valid && !s1_is_replay && !f1_clear) || s1_is_sfence
tlb.io.req.bits.cmd := DontCare
tlb.io.req.bits.vaddr := s1_vpc
tlb.io.req.bits.passthrough := false.B
tlb.io.req.bits.size := log2Ceil(coreInstBytes * fetchWidth).U
tlb.io.req.bits.prv := io.ptw.status.prv
tlb.io.req.bits.v := io.ptw.status.v
tlb.io.sfence := RegNext(io.cpu.sfence)
tlb.io.kill := false.B
val s1_tlb_miss = !s1_is_replay && tlb.io.resp.miss
val s1_tlb_resp = Mux(s1_is_replay, RegNext(s0_replay_resp), tlb.io.resp)
val s1_ppc = Mux(s1_is_replay, RegNext(s0_replay_ppc), tlb.io.resp.paddr)
val s1_bpd_resp = bpd.io.resp.f1
icache.io.s1_paddr := s1_ppc
icache.io.s1_kill := tlb.io.resp.miss || f1_clear
val f1_mask = fetchMask(s1_vpc)
val f1_redirects = (0 until fetchWidth) map { i =>
f1_mask(i) && s1_bpd_resp.preds(i).predicted_pc.valid && s1_bpd_resp.preds(i).taken
}
val f1_do_redirect = f1_redirects.reduce(_||_) && useBPD.B
val f1_targs = s1_bpd_resp.preds.map(_.predicted_pc.bits)
val f1_targ = if (nBanks == 1) {
Mux1H(f1_redirects, f1_targs)
} else {
require(nBanks == 2)
Mux(f1_redirects.take(bankWidth).reduce(_||_), Mux1H(f1_redirects.take(bankWidth), f1_targs.take(bankWidth)),
Mux1H(f1_redirects.drop(bankWidth), f1_targs.drop(bankWidth)))
}
val f1_next_fetch = nextFetch(s1_vpc)
val f1_predicted_target = Mux(f1_do_redirect,
f1_targ,
f1_next_fetch)
val f1_predicted_ghist = s1_ghist.update(
s1_bpd_resp.preds.map(p => p.is_br).asUInt & f1_mask,
PriorityMux(f1_redirects, s1_bpd_resp.preds).taken && f1_do_redirect,
PriorityMux(f1_redirects, s1_bpd_resp.preds).is_br,
PriorityEncoder(f1_redirects),
f1_do_redirect,
s1_vpc,
false.B,
false.B)
when (s1_valid) {
// Stop fetching on fault
s0_valid := true.B
s0_tsrc := BSRC_1
s0_vpc := f1_predicted_target
s0_ghist := f1_predicted_ghist
s0_is_replay := false.B
}
// --------------------------------------------------------
// **** ICache Response (F2) ****
// --------------------------------------------------------
val s2_valid = RegNext(s1_valid && !f1_clear, false.B)
val s2_vpc = RegNext(s1_vpc)
val s2_ghist = Reg(new GlobalHistory)
s2_ghist := s1_ghist
val s2_ppc = RegNext(s1_ppc)
val s2_tsrc = RegNext(s1_tsrc) // tsrc provides the predictor component which provided the prediction TO this instruction
val s2_fsrc = WireInit(BSRC_1) // fsrc provides the predictor component which provided the prediction FROM this instruction
val f2_clear = WireInit(false.B)
val s2_tlb_resp = RegNext(s1_tlb_resp)
val s2_tlb_miss = RegNext(s1_tlb_miss)
val s2_is_replay = RegNext(s1_is_replay) && s2_valid
val s2_xcpt = s2_valid && (s2_tlb_resp.ae.inst || s2_tlb_resp.pf.inst) && !s2_is_replay
val f3_ready = Wire(Bool())
icache.io.s2_kill := s2_xcpt
icache.io.s2_prefetch := false.B
val f2_bpd_resp = bpd.io.resp.f2
val f2_fetch_mask = fetchMask(s2_vpc)
val f2_redirects = (0 until fetchWidth) map { i =>
f2_fetch_mask(i) && f2_bpd_resp.preds(i).predicted_pc.valid && f2_bpd_resp.preds(i).taken
}
val f2_targs = f2_bpd_resp.preds.map(_.predicted_pc.bits)
val f2_do_redirect = f2_redirects.reduce(_||_) && useBPD.B
val f2_next_fetch = RegNext(f1_next_fetch)
val f2_predicted_target = Mux(f2_do_redirect,
if (useSlowBTBRedirect) RegNext(f1_targ) else PriorityMux(f2_redirects, f2_targs),
f2_next_fetch)
val f2_predicted_ghist = s2_ghist.update(
f2_bpd_resp.preds.map(p => p.is_br && p.predicted_pc.valid).asUInt & f2_fetch_mask,
PriorityMux(f2_redirects, f2_bpd_resp.preds).taken && f2_do_redirect,
PriorityMux(f2_redirects, f2_bpd_resp.preds).is_br,
PriorityEncoder(f2_redirects),
f2_do_redirect,
s2_vpc,
false.B,
false.B)
val f2_aligned_pc = bankAlign(s2_vpc)
val f2_bank_mask = bankMask(s2_vpc)
val f2_inst_mask = Wire(Vec(fetchWidth, Bool()))
// Tracks trailing 16b of previous fetch packet
val f2_prev_half = Reg(UInt(16.W))
// Tracks if last fetchpacket contained a half-inst
val f2_prev_is_half = RegInit(false.B)
val f2_fetch_bundle = Wire(new FetchBundle)
f2_fetch_bundle := DontCare
f2_fetch_bundle.pc := s2_vpc
f2_fetch_bundle.next_pc := Mux(f2_do_redirect, PriorityMux(f2_redirects, f2_targs), f2_next_fetch)
f2_fetch_bundle.next_fetch := f2_next_fetch
f2_fetch_bundle.xcpt_pf_if := s2_tlb_resp.pf.inst
f2_fetch_bundle.xcpt_ae_if := s2_tlb_resp.ae.inst
f2_fetch_bundle.fsrc := s2_fsrc
f2_fetch_bundle.tsrc := s2_tsrc
f2_fetch_bundle.ghist := s2_ghist
f2_fetch_bundle.mask := f2_inst_mask.asUInt
f2_fetch_bundle.cfi_idx.valid := f2_redirects.reduce(_||_)
f2_fetch_bundle.cfi_idx.bits := PriorityEncoder(f2_redirects)
require(fetchWidth >= 4) // Logic gets kind of annoying with fetchWidth = 2
def isRVC(inst: UInt) = (inst(1,0) =/= 3.U)
var bank_prev_is_half = f2_prev_is_half
var bank_prev_half = f2_prev_half
var last_inst = 0.U(16.W)
for (b <- 0 until nBanks) {
f2_fetch_bundle.bpd_meta(b) := 0.U(1.W)
val bank_data = icache.io.resp.bits.data((b+1)*bankWidth*16-1, b*bankWidth*16)
for (w <- 0 until bankWidth) {
val i = (b * bankWidth) + w
val valid = Wire(Bool())
f2_inst_mask(i) := s2_valid && f2_fetch_mask(i) && valid
f2_fetch_bundle.pcs(i) := f2_aligned_pc + (i << 1).U - ((f2_fetch_bundle.edge_inst(b) && (w == 0).B) << 1)
if (w == 0) {
valid := true.B
when (bank_prev_is_half) {
f2_fetch_bundle.insts(i) := Cat(bank_data(15,0), f2_prev_half)
f2_fetch_bundle.exp_insts(i) := ExpandRVC(Cat(bank_data(15,0), f2_prev_half))
f2_fetch_bundle.edge_inst(b) := true.B
if (b > 0) {
when (f2_bank_mask(b-1)) {
f2_fetch_bundle.insts(i) := Cat(bank_data(15,0), last_inst)
f2_fetch_bundle.exp_insts(i) := ExpandRVC(Cat(bank_data(15,0), last_inst))
}
}
} .otherwise {
f2_fetch_bundle.insts(i) := bank_data(31,0)
f2_fetch_bundle.exp_insts(i) := ExpandRVC(bank_data(31,0))
f2_fetch_bundle.edge_inst(b) := false.B
}
} else if (w == 1) {
// Need special case since 0th instruction may carry over the wrap around
val inst = bank_data(47,16)
f2_fetch_bundle.insts(i) := inst
f2_fetch_bundle.exp_insts(i) := ExpandRVC(inst)
valid := bank_prev_is_half || !(f2_inst_mask(i-1) && !isRVC(f2_fetch_bundle.insts(i-1)))
} else if (w == bankWidth - 1) {
val inst = Cat(0.U(16.W), bank_data(bankWidth*16-1,(bankWidth-1)*16))
f2_fetch_bundle.insts(i) := inst
f2_fetch_bundle.exp_insts(i) := ExpandRVC(inst)
valid := !((f2_inst_mask(i-1) && !isRVC(f2_fetch_bundle.insts(i-1))) || !isRVC(inst))
} else {
val inst = bank_data(w*16+32-1,w*16)
f2_fetch_bundle.insts(i) := inst
f2_fetch_bundle.exp_insts(i) := ExpandRVC(inst)
valid := !(f2_inst_mask(i-1) && !isRVC(f2_fetch_bundle.insts(i-1)))
}
}
last_inst = f2_fetch_bundle.insts((b+1)*bankWidth-1)(15,0)
bank_prev_is_half = Mux(f2_bank_mask(b),
(!(f2_inst_mask((b+1)*bankWidth-2) && !isRVC(f2_fetch_bundle.insts((b+1)*bankWidth-2))) && !isRVC(last_inst)),
bank_prev_is_half)
bank_prev_half = Mux(f2_bank_mask(b),
last_inst(15,0),
bank_prev_half)
}
f2_fetch_bundle.end_half.valid := bank_prev_is_half
f2_fetch_bundle.end_half.bits := bank_prev_half
val f2_correct_f1_ghist = s1_ghist =/= f2_predicted_ghist && enableGHistStallRepair.B
when ((s2_valid && !icache.io.resp.valid) ||
(s2_valid && icache.io.resp.valid && !f3_ready)) {
s0_valid := (!s2_tlb_resp.ae.inst && !s2_tlb_resp.pf.inst) || s2_is_replay || s2_tlb_miss
s0_vpc := s2_vpc
s0_is_replay := s2_valid && icache.io.resp.valid
s0_ghist := s2_ghist
s0_tsrc := s2_tsrc
f1_clear := true.B
} .elsewhen (s2_valid && f3_ready) {
when (s1_valid && s1_vpc === f2_predicted_target && !f2_correct_f1_ghist) {
// We trust our prediction of what the global history for the next branch should be
s2_ghist := f2_predicted_ghist
}
f2_prev_is_half := bank_prev_is_half && !f2_do_redirect
f2_prev_half := bank_prev_half
when ((s1_valid && (s1_vpc =/= f2_predicted_target || f2_correct_f1_ghist)) || !s1_valid) {
f1_clear := true.B
s0_valid := !((s2_tlb_resp.ae.inst || s2_tlb_resp.pf.inst) && !s2_is_replay)
s0_vpc := f2_predicted_target
s0_is_replay := false.B
s0_ghist := f2_predicted_ghist
s2_fsrc := BSRC_2
s0_tsrc := BSRC_2
}
}
s0_replay_resp := s2_tlb_resp
s0_replay_ppc := s2_ppc
// --------------------------------------------------------
// **** F3 ****
// --------------------------------------------------------
val f3_clear = WireInit(false.B)
val f3 = withReset(reset.asBool || f3_clear) {
Module(new Queue(new FetchBundle, 1, pipe=true, flow=false)) }
// Queue up the bpd resp as well, incase f4 backpressures f3
// This is "flow" because the response (enq) arrives in f3, not f2
val f3_bpd_queue = withReset(reset.asBool || f3_clear) {
Module(new Queue(new BranchPredictionBundle, 1, pipe=true, flow=true)) }
val f4_ready = Wire(Bool())
f3_ready := f3.io.enq.ready
f3.io.enq.valid := (s2_valid && !f2_clear &&
(icache.io.resp.valid || ((s2_tlb_resp.ae.inst || s2_tlb_resp.pf.inst) && !s2_tlb_miss))
)
f3.io.enq.bits := f2_fetch_bundle
// The BPD resp comes in f3
f3_bpd_queue.io.enq.valid := f3.io.deq.valid && RegNext(f3.io.enq.ready)
f3_bpd_queue.io.enq.bits := bpd.io.resp.f3
when (f3_bpd_queue.io.enq.fire) {
bpd.io.f3_fire := true.B
}
f3.io.deq.ready := f4_ready
f3_bpd_queue.io.deq.ready := f4_ready
val f3_bpd_resp = f3_bpd_queue.io.deq.bits
val f3_bank_mask = bankMask(f3.io.deq.bits.pc)
val f3_aligned_pc = bankAlign(f3.io.deq.bits.pc)
val f3_is_last_bank_in_block = isLastBankInBlock(f3_aligned_pc)
val f3_is_rvc = Wire(Vec(fetchWidth, Bool()))
val f3_redirects = Wire(Vec(fetchWidth, Bool()))
val f3_targs = Wire(Vec(fetchWidth, UInt(vaddrBitsExtended.W)))
val f3_cfi_types = Wire(Vec(fetchWidth, UInt(CFI_SZ.W)))
val f3_shadowed_mask = Wire(Vec(fetchWidth, Bool()))
//val f3_fetch_bundle = Wire(new FetchBundle)
val f3_fetch_bundle = WireInit(f3.io.deq.bits)
val f3_mask = Wire(Vec(fetchWidth, Bool()))
val f3_br_mask = Wire(Vec(fetchWidth, Bool()))
val f3_call_mask = Wire(Vec(fetchWidth, Bool()))
val f3_ret_mask = Wire(Vec(fetchWidth, Bool()))
val f3_npc_plus4_mask = Wire(Vec(fetchWidth, Bool()))
val f3_btb_mispredicts = Wire(Vec(fetchWidth, Bool()))
f3_fetch_bundle.mask := f3_mask.asUInt
f3_fetch_bundle.br_mask := f3_br_mask.asUInt
f3_fetch_bundle.ftq_idx := 0.U // This gets assigned later
f3_fetch_bundle.shadowed_mask := f3_shadowed_mask
var redirect_found = false.B
for (b <- 0 until nBanks) {
for (w <- 0 until bankWidth) {
val i = (b * bankWidth) + w
val pc = f3_fetch_bundle.pcs(i)
val bpu = Module(new BreakpointUnit(nBreakpoints))
bpu.io.status := io.cpu.status
bpu.io.bp := io.cpu.bp
bpu.io.ea := DontCare
bpu.io.pc := pc
bpu.io.mcontext := io.cpu.mcontext
bpu.io.scontext := io.cpu.scontext
val bpd_decoder = Module(new BranchDecode)
bpd_decoder.io.inst := f3_fetch_bundle.exp_insts(i)
bpd_decoder.io.pc := pc
val brsigs = bpd_decoder.io.out
f3_is_rvc(i) := isRVC(f3_fetch_bundle.insts(i))
f3_mask (i) := f3.io.deq.valid && f3.io.deq.bits.mask(i) && !redirect_found
f3_targs (i) := Mux(brsigs.cfi_type === CFI_JALR,
f3_bpd_resp.preds(i).predicted_pc.bits,
brsigs.target)
// Flush BTB entries for JALs if we mispredict the target
f3_btb_mispredicts(i) := (brsigs.cfi_type === CFI_JAL && f3.io.deq.bits.mask(i) &&
f3_bpd_resp.preds(i).predicted_pc.valid &&
(f3_bpd_resp.preds(i).predicted_pc.bits =/= brsigs.target)
)
f3_npc_plus4_mask(i) := (if (w == 0) {
!f3_is_rvc(i) && !f3_fetch_bundle.edge_inst(b)
} else {
!f3_is_rvc(i)
})
val offset_from_aligned_pc = (
(i << 1).U((log2Ceil(icBlockBytes)+1).W) +
brsigs.sfb_offset.bits -
Mux(f3_fetch_bundle.edge_inst(b) && (w == 0).B, 2.U, 0.U)
)
val lower_mask = Wire(UInt((2*fetchWidth).W))
val upper_mask = Wire(UInt((2*fetchWidth).W))
lower_mask := UIntToOH(i.U)
upper_mask := UIntToOH(offset_from_aligned_pc(log2Ceil(fetchBytes)+1,1)) << Mux(f3_is_last_bank_in_block, bankWidth.U, 0.U)
f3_fetch_bundle.sfbs(i) := (
f3_mask(i) &&
brsigs.sfb_offset.valid &&
(offset_from_aligned_pc <= Mux(f3_is_last_bank_in_block, (fetchBytes+bankBytes).U,(2*fetchBytes).U))
)
f3_fetch_bundle.sfb_masks(i) := ~MaskLower(lower_mask) & ~MaskUpper(upper_mask)
f3_fetch_bundle.shadowable_mask(i) := (!(f3_fetch_bundle.xcpt_pf_if || f3_fetch_bundle.xcpt_ae_if || bpu.io.debug_if || bpu.io.xcpt_if) &&
f3_bank_mask(b) &&
(brsigs.shadowable || !f3_mask(i)))
f3_fetch_bundle.sfb_dests(i) := offset_from_aligned_pc
// Redirect if
// 1) its a JAL/JALR (unconditional)
// 2) the BPD believes this is a branch and says we should take it
f3_redirects(i) := f3_mask(i) && (
brsigs.cfi_type === CFI_JAL || brsigs.cfi_type === CFI_JALR ||
(brsigs.cfi_type === CFI_BR && f3_bpd_resp.preds(i).taken && useBPD.B)
)
f3_br_mask(i) := f3_mask(i) && brsigs.cfi_type === CFI_BR
f3_cfi_types(i) := brsigs.cfi_type
f3_call_mask(i) := brsigs.is_call
f3_ret_mask(i) := brsigs.is_ret
f3_fetch_bundle.bp_debug_if_oh(i) := bpu.io.debug_if
f3_fetch_bundle.bp_xcpt_if_oh (i) := bpu.io.xcpt_if
redirect_found = redirect_found || f3_redirects(i)
}
}
f3_fetch_bundle.cfi_type := f3_cfi_types(f3_fetch_bundle.cfi_idx.bits)
f3_fetch_bundle.cfi_is_call := f3_call_mask(f3_fetch_bundle.cfi_idx.bits)
f3_fetch_bundle.cfi_is_ret := f3_ret_mask (f3_fetch_bundle.cfi_idx.bits)
f3_fetch_bundle.cfi_npc_plus4 := f3_npc_plus4_mask(f3_fetch_bundle.cfi_idx.bits)
f3_fetch_bundle.lhist := f3_bpd_resp.lhist
f3_fetch_bundle.bpd_meta := f3_bpd_resp.meta
when (f3.io.deq.fire) {
assert(f3_bpd_resp.pc === f3_fetch_bundle.pc)
}
f3_fetch_bundle.cfi_idx.valid := f3_redirects.reduce(_||_)
f3_fetch_bundle.cfi_idx.bits := PriorityEncoder(f3_redirects)
// Use the branch predictor response in fetch-3, the decoded branch target
// isn't available fast enough
val f3_predicted_targs = f3_bpd_resp.preds.map(_.predicted_pc.bits)
val (f3_predicted_redirects, f3_redirect_target) = {
val redirects = VecInit((0 until fetchWidth) map { i =>
f3.io.deq.bits.mask(i) && f3_bpd_resp.preds(i).predicted_pc.valid && f3_bpd_resp.preds(i).taken
})
val target = if (useSlowBTBRedirect) f3.io.deq.bits.next_pc else PriorityMux(redirects, f3_predicted_targs)
(redirects, target)
}
val f3_predicted_do_redirect = f3_predicted_redirects.reduce(_||_) && useBPD.B
val f3_predicted_target = Mux(f3_predicted_do_redirect,
f3_redirect_target,
f3.io.deq.bits.next_fetch)
val f3_predicted_ghist = f3_fetch_bundle.ghist.update(
f3_bpd_resp.preds.map(p => p.is_br && p.predicted_pc.valid).asUInt & f3.io.deq.bits.mask,
PriorityMux(f3_predicted_redirects, f3_bpd_resp.preds).taken && f3_predicted_do_redirect,
PriorityMux(f3_predicted_redirects, f3_bpd_resp.preds).is_br,
PriorityEncoder(f3_predicted_redirects),
f3_predicted_do_redirect,
f3.io.deq.bits.pc,
false.B,
false.B
)
val f3_decoded_target = Mux(f3_redirects.reduce(_||_),
PriorityMux(f3_redirects, f3_targs),
f3.io.deq.bits.next_fetch
)
f3_fetch_bundle.next_pc := f3_decoded_target
val f3_correct_f2_ghist = s2_ghist =/= f3_predicted_ghist && enableGHistStallRepair.B
val f3_correct_f1_ghist = s1_ghist =/= f3_predicted_ghist && enableGHistStallRepair.B
when (f3.io.deq.valid && f4_ready) {
when (s2_valid && s2_vpc === f3_predicted_target && !f3_correct_f2_ghist) {
f3.io.enq.bits.ghist := f3_predicted_ghist
} .elsewhen (( s2_valid && (s2_vpc =/= f3_predicted_target || f3_correct_f2_ghist)) ||
(!s2_valid && s1_valid && (s1_vpc =/= f3_predicted_target || f3_correct_f1_ghist)) ||
(!s2_valid && !s1_valid)) {
f2_clear := true.B
f2_prev_is_half := f3_fetch_bundle.end_half.valid && !f3_predicted_do_redirect
f2_prev_half := f3_fetch_bundle.end_half.bits
f1_clear := true.B
s0_valid := !(f3_fetch_bundle.xcpt_pf_if || f3_fetch_bundle.xcpt_ae_if)
s0_vpc := f3_predicted_target
s0_is_replay := false.B
s0_ghist := f3_predicted_ghist
s0_tsrc := BSRC_3
f3_fetch_bundle.fsrc := BSRC_3
}
}
// When f3 finds a btb mispredict, queue up a bpd correction update
val f4_btb_corrections = Module(new Queue(new BranchPredictionUpdate, 2))
f4_btb_corrections.io.enq.valid := f3.io.deq.fire && f3_btb_mispredicts.reduce(_||_) && enableBTBFastRepair.B
f4_btb_corrections.io.enq.bits := DontCare
f4_btb_corrections.io.enq.bits.is_mispredict_update := false.B
f4_btb_corrections.io.enq.bits.is_repair_update := false.B
f4_btb_corrections.io.enq.bits.btb_mispredicts := f3_btb_mispredicts.asUInt
f4_btb_corrections.io.enq.bits.pc := f3_fetch_bundle.pc
f4_btb_corrections.io.enq.bits.ghist := f3_fetch_bundle.ghist
f4_btb_corrections.io.enq.bits.lhist := f3_fetch_bundle.lhist
f4_btb_corrections.io.enq.bits.meta := f3_fetch_bundle.bpd_meta
// -------------------------------------------------------
// **** F4 ****
// -------------------------------------------------------
val f4_clear = WireInit(false.B)
val f4 = withReset(reset.asBool || f4_clear) {
Module(new Queue(new FetchBundle, 1, pipe=true, flow=false))}
// TODO: Allow for 4-cycle branch predictors, instead of just reusing the cycle-3
// response
val f4_bpd_queue = withReset(reset.asBool || f3_clear) {
Module(new Queue(new BranchPredictionBundle, 1, pipe=true, flow=false)) }
val fb = Module(new FetchBuffer)
val ftq = Module(new FetchTargetQueue)
// RAS takes a cycle to read
val ras_read_idx = RegInit(0.U(log2Ceil(nRasEntries).W))
ras.io.read_idx := ras_read_idx
when (f3.io.deq.fire) {
ras_read_idx := f4.io.enq.bits.ghist.ras_idx
ras.io.read_idx := f4.io.enq.bits.ghist.ras_idx
}
// Deal with sfbs
val f4_shadowable_masks = VecInit((0 until fetchWidth) map { i =>
f4.io.deq.bits.shadowable_mask.asUInt |
~f4.io.deq.bits.sfb_masks(i)(fetchWidth-1,0)
})
val f3_shadowable_masks = VecInit((0 until fetchWidth) map { i =>
Mux(f4.io.enq.valid, f4.io.enq.bits.shadowable_mask.asUInt, 0.U) |
~f4.io.deq.bits.sfb_masks(i)(2*fetchWidth-1,fetchWidth)
})
val f4_sfbs = VecInit((0 until fetchWidth) map { i =>
enableSFBOpt.B &&
((~f4_shadowable_masks(i) === 0.U) &&
(~f3_shadowable_masks(i) === 0.U) &&
f4.io.deq.bits.sfbs(i) &&
!(f4.io.deq.bits.cfi_idx.valid && f4.io.deq.bits.cfi_idx.bits === i.U) &&
Mux(f4.io.deq.bits.sfb_dests(i) === 0.U,
!f3.io.deq.bits.end_half.valid,
Mux(f4.io.deq.bits.sfb_dests(i) === fetchBytes.U,
!f4.io.deq.bits.end_half.valid,
true.B)
)
)
})
val f4_sfb_valid = f4_sfbs.reduce(_||_) && f4.io.deq.valid
val f4_sfb_mask = PriorityMux(f4_sfbs, f4.io.deq.bits.sfb_masks)
// If we have a SFB, wait for next fetch to be available in f3
val f4_delay = (
f4.io.deq.bits.sfbs.reduce(_||_) &&
!f4.io.deq.bits.cfi_idx.valid &&
!f4.io.enq.valid &&
!f4.io.deq.bits.xcpt_pf_if &&
!f4.io.deq.bits.xcpt_ae_if
)
when (f4_sfb_valid) {
f3_shadowed_mask := f4_sfb_mask(2*fetchWidth-1,fetchWidth).asBools
} .otherwise {
f3_shadowed_mask := VecInit(0.U(fetchWidth.W).asBools)
}
f4_ready := f4.io.enq.ready
f4.io.enq.valid := f3.io.deq.valid && !f3_clear
f4.io.enq.bits := f3_fetch_bundle
f4.io.deq.ready := fb.io.enq.ready && ftq.io.enq.ready && !f4_delay
f4_bpd_queue.io.enq.valid := f3.io.enq.valid
f4_bpd_queue.io.enq.bits := f3_bpd_resp
f4_bpd_queue.io.deq.ready := f4.io.deq.ready
fb.io.enq.valid := f4.io.deq.valid && ftq.io.enq.ready && !f4_delay
fb.io.enq.bits := f4.io.deq.bits
fb.io.enq.bits.ftq_idx := ftq.io.enq_idx
fb.io.enq.bits.sfbs := Mux(f4_sfb_valid, UIntToOH(PriorityEncoder(f4_sfbs)), 0.U(fetchWidth.W)).asBools
fb.io.enq.bits.shadowed_mask := (
Mux(f4_sfb_valid, f4_sfb_mask(fetchWidth-1,0), 0.U(fetchWidth.W)) |
f4.io.deq.bits.shadowed_mask.asUInt
).asBools
ftq.io.enq.valid := f4.io.deq.valid && fb.io.enq.ready && !f4_delay
ftq.io.enq.bits := f4.io.deq.bits
ftq.io.enq.bits.ras_top := ras.io.read_addr
val bpd_update_arbiter = Module(new Arbiter(new BranchPredictionUpdate, 2))
bpd_update_arbiter.io.in(0).valid := ftq.io.bpdupdate.valid
bpd_update_arbiter.io.in(0).bits := ftq.io.bpdupdate.bits
assert(bpd_update_arbiter.io.in(0).ready)
bpd_update_arbiter.io.in(1) <> f4_btb_corrections.io.deq
bpd.io.update := bpd_update_arbiter.io.out
bpd_update_arbiter.io.out.ready := true.B
val f4_decoded_ghist = f4.io.deq.bits.ghist.update(
f4.io.deq.bits.br_mask,
f4.io.deq.bits.cfi_idx.valid,
f4.io.deq.bits.br_mask(f4.io.deq.bits.cfi_idx.bits),
f4.io.deq.bits.cfi_idx.bits,
f4.io.deq.bits.cfi_idx.valid,
f4.io.deq.bits.pc,
f4.io.deq.bits.cfi_is_call,
f4.io.deq.bits.cfi_is_ret
)
val f4_decoded_target = Mux(f4.io.deq.bits.cfi_idx.valid && f4.io.deq.bits.cfi_is_ret && useBPD.B && useRAS.B,
ras.io.read_addr, f4.io.deq.bits.next_pc)
val f4_correct_f1_ghist = s1_ghist =/= f4_decoded_ghist && enableGHistStallRepair.B
val f4_correct_f2_ghist = s2_ghist =/= f4_decoded_ghist && enableGHistStallRepair.B
val f4_correct_f3_ghist = f3.io.deq.bits.ghist =/= f4_decoded_ghist && enableGHistStallRepair.B
when (f4.io.deq.valid) {
when (f3.io.deq.valid && f3.io.deq.bits.pc === f4_decoded_target && !f4_correct_f3_ghist) {
f4.io.enq.bits.ghist := f4_decoded_ghist
} .elsewhen (!f3.io.deq.valid && s2_valid && s2_vpc === f4_decoded_target && !f4_correct_f2_ghist) {
f3.io.enq.bits.ghist := f4_decoded_ghist
} .elsewhen (!f3.io.deq.valid && !s2_valid && s1_vpc === f4_decoded_target && !f4_correct_f1_ghist) {
s2_ghist := f4_decoded_ghist
} .elsewhen (( f3.io.deq.valid && (f3.io.deq.bits.pc =/= f4_decoded_target || f4_correct_f3_ghist)) ||
(!f3.io.deq.valid && s2_valid && (s2_vpc =/= f4_decoded_target || f4_correct_f2_ghist)) ||
(!f3.io.deq.valid && !s2_valid && s1_valid && (s1_vpc =/= f4_decoded_target || f4_correct_f1_ghist)) ||
(!f3.io.deq.valid && !s2_valid && !s1_valid)) {
f3_clear := true.B
f2_clear := true.B
f2_prev_is_half := f4.io.deq.bits.end_half.valid && !f4.io.deq.bits.cfi_idx.valid
f2_prev_half := f4.io.deq.bits.end_half.bits
f1_clear := true.B
s0_valid := !(f4.io.deq.bits.xcpt_pf_if || f4.io.deq.bits.xcpt_ae_if)
s0_vpc := f4_decoded_target
s0_is_replay := false.B
s0_ghist := f4_decoded_ghist
s0_tsrc := BSRC_4
fb.io.enq.bits.fsrc := BSRC_4
}
}
ras.io.write_valid := f4.io.deq.valid && f4.io.deq.bits.cfi_is_call && f4.io.deq.bits.cfi_idx.valid
ras.io.write_addr := bankAlign(f4.io.deq.bits.pc) + (f4.io.deq.bits.cfi_idx.bits << 1) + Mux(
f4.io.deq.bits.cfi_npc_plus4, 4.U, 2.U)
ras.io.write_idx := WrapInc(f4.io.deq.bits.ghist.ras_idx, nRasEntries)
when (ftq.io.ras_update && enableRasTopRepair.B) {
ras.io.write_valid := true.B
ras.io.write_idx := ftq.io.ras_update_idx
ras.io.write_addr := ftq.io.ras_update_pc
}
// -------------------------------------------------------
// **** To Core (F5) ****
// -------------------------------------------------------
io.cpu.fetchpacket <> fb.io.deq
ftq.io.arb_ftq_reqs := io.cpu.arb_ftq_reqs
io.cpu.rrd_ftq_resps := ftq.io.rrd_ftq_resps
io.cpu.com_pc := ftq.io.com_pc
ftq.io.deq := io.cpu.commit
ftq.io.brupdate := io.cpu.brupdate
ftq.io.redirect.valid := io.cpu.redirect_val
ftq.io.redirect.bits := io.cpu.redirect_ftq_idx
fb.io.clear := false.B
when (io.cpu.sfence.valid) {
fb.io.clear := true.B
f4_clear := true.B
f3_clear := true.B
f2_clear := true.B
f2_prev_is_half := false.B
f1_clear := true.B
s0_valid := false.B
s0_vpc := io.cpu.sfence.bits.addr
s0_is_replay := false.B
s0_is_sfence := true.B
}.elsewhen (io.cpu.redirect_flush) {
fb.io.clear := true.B
f4_clear := true.B
f3_clear := true.B
f2_clear := true.B
f2_prev_is_half := false.B
f1_clear := true.B
s0_valid := io.cpu.redirect_val
s0_vpc := io.cpu.redirect_pc
s0_ghist := io.cpu.redirect_ghist
s0_tsrc := BSRC_C
s0_is_replay := false.B
ftq.io.redirect.valid := io.cpu.redirect_val
ftq.io.redirect.bits := io.cpu.redirect_ftq_idx
}
ftq.io.debug_ftq_idx := io.cpu.debug_ftq_idx
io.cpu.debug_fetch_pc := ftq.io.debug_fetch_pc
val jump_to_reset = RegInit(true.B)
when (jump_to_reset) {
s0_valid := true.B
s0_vpc := io_reset_vector
s0_ghist := (0.U).asTypeOf(new GlobalHistory)
s0_tsrc := BSRC_C
fb.io.clear := true.B
f4_clear := true.B
f3_clear := true.B
f2_clear := true.B
f2_prev_is_half := false.B
f1_clear := true.B
jump_to_reset := false.B
}
override def toString: String =
(BoomCoreStringPrefix("====Overall Frontend Params====") + "\n"
+ icache.toString + bpd.toString)
}
File fetch-target-queue.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Fetch Target Queue (FTQ)
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Each entry in the FTQ holds the fetch address and branch prediction snapshot state.
//
// TODO:
// * reduce port counts.
package boom.v4.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util.{Str}
import boom.v4.common._
import boom.v4.exu._
import boom.v4.util._
/**
* FTQ Parameters used in configurations
*
* @param nEntries # of entries in the FTQ
*/
case class FtqParameters(
nEntries: Int = 16
)
/**
* Bundle to add to the FTQ RAM and to be used as the pass in IO
*/
class FTQBundle(implicit p: Parameters) extends BoomBundle
with HasBoomFrontendParameters
{
// IDX of instruction that was predicted taken, if any
val cfi_idx = Valid(UInt(log2Ceil(fetchWidth).W))
// Was the CFI in this bundle found to be taken? or not
val cfi_taken = Bool()
// Was this CFI mispredicted by the branch prediction pipeline?
val cfi_mispredicted = Bool()
// What type of CFI was taken out of this bundle
val cfi_type = UInt(CFI_SZ.W)
// mask of branches which were visible in this fetch bundle
val br_mask = UInt(fetchWidth.W)
// This CFI is likely a CALL
val cfi_is_call = Bool()
// This CFI is likely a RET
val cfi_is_ret = Bool()
// Is the NPC after the CFI +4 or +2
val cfi_npc_plus4 = Bool()
// What was the top of the RAS that this bundle saw?
val ras_top = UInt(vaddrBitsExtended.W)
val ras_idx = UInt(log2Ceil(nRasEntries).W)
// Which bank did this start from?
val start_bank = UInt(1.W)
}
class FTQInfo(implicit p: Parameters) extends BoomBundle
{
val valid = Bool()
val entry = new FTQBundle
val ghist = new GlobalHistory
val pc = UInt(vaddrBitsExtended.W)
}
/**
* Queue to store the fetch PC and other relevant branch predictor signals that are inflight in the
* processor.
*
* @param num_entries # of entries in the FTQ
*/
class FetchTargetQueue(implicit p: Parameters) extends BoomModule
with HasBoomCoreParameters
with HasBoomFrontendParameters
{
val num_entries = ftqSz
private val idx_sz = log2Ceil(num_entries)
val io = IO(new BoomBundle {
// Enqueue one entry for every fetch cycle.
val enq = Flipped(Decoupled(new FetchBundle()))
// Pass to FetchBuffer (newly fetched instructions).
val enq_idx = Output(UInt(idx_sz.W))
// ROB tells us the youngest committed ftq_idx to remove from FTQ.
val deq = Flipped(Valid(UInt(idx_sz.W)))
// Give PC info to BranchUnit.
val arb_ftq_reqs = Input(Vec(3, UInt(log2Ceil(ftqSz).W)))
val rrd_ftq_resps = Output(Vec(3, new FTQInfo))
val com_pc = Output(UInt(vaddrBitsExtended.W))
// Used to regenerate PC for trace port stuff in FireSim
// Don't tape this out, this blows up the FTQ
val debug_ftq_idx = Input(Vec(coreWidth, UInt(log2Ceil(ftqSz).W)))
val debug_fetch_pc = Output(Vec(coreWidth, UInt(vaddrBitsExtended.W)))
val redirect = Input(Valid(UInt(idx_sz.W)))
val brupdate = Input(new BrUpdateInfo)
val bpdupdate = Output(Valid(new BranchPredictionUpdate))
val ras_update = Output(Bool())
val ras_update_idx = Output(UInt(log2Ceil(nRasEntries).W))
val ras_update_pc = Output(UInt(vaddrBitsExtended.W))
})
val bpd_ptr = RegInit(0.U(idx_sz.W))
val deq_ptr = RegInit(0.U(idx_sz.W))
val enq_ptr = RegInit(1.U(idx_sz.W))
val full = ((WrapInc(WrapInc(enq_ptr, num_entries), num_entries) === bpd_ptr) ||
(WrapInc(enq_ptr, num_entries) === bpd_ptr))
val pcs = Reg(Vec(num_entries, UInt(vaddrBitsExtended.W)))
val meta = SyncReadMem(num_entries, Vec(nBanks, UInt(bpdMaxMetaLength.W)))
val ram = Reg(Vec(num_entries, new FTQBundle))
val ghist = Seq.fill(2) { SyncReadMem(num_entries, new GlobalHistory) }
val lhist = if (useLHist) {
Some(SyncReadMem(num_entries, Vec(nBanks, UInt(localHistoryLength.W))))
} else {
None
}
val do_enq = io.enq.fire
// This register lets us initialize the ghist to 0
val prev_ghist = RegInit((0.U).asTypeOf(new GlobalHistory))
val prev_entry = RegInit((0.U).asTypeOf(new FTQBundle))
val prev_pc = RegInit(0.U(vaddrBitsExtended.W))
when (do_enq) {
pcs(enq_ptr) := io.enq.bits.pc
val new_entry = Wire(new FTQBundle)
new_entry.cfi_idx := io.enq.bits.cfi_idx
// Initially, if we see a CFI, it is assumed to be taken.
// Branch resolutions may change this
new_entry.cfi_taken := io.enq.bits.cfi_idx.valid
new_entry.cfi_mispredicted := false.B
new_entry.cfi_type := io.enq.bits.cfi_type
new_entry.cfi_is_call := io.enq.bits.cfi_is_call
new_entry.cfi_is_ret := io.enq.bits.cfi_is_ret
new_entry.cfi_npc_plus4 := io.enq.bits.cfi_npc_plus4
new_entry.ras_top := io.enq.bits.ras_top
new_entry.ras_idx := io.enq.bits.ghist.ras_idx
new_entry.br_mask := io.enq.bits.br_mask & io.enq.bits.mask
new_entry.start_bank := bank(io.enq.bits.pc)
val new_ghist = Mux(io.enq.bits.ghist.current_saw_branch_not_taken,
io.enq.bits.ghist,
prev_ghist.update(
prev_entry.br_mask,
prev_entry.cfi_taken,
prev_entry.br_mask(prev_entry.cfi_idx.bits),
prev_entry.cfi_idx.bits,
prev_entry.cfi_idx.valid,
prev_pc,
prev_entry.cfi_is_call,
prev_entry.cfi_is_ret
)
)
lhist.map( l => l.write(enq_ptr, io.enq.bits.lhist))
ghist.map( g => g.write(enq_ptr, new_ghist))
meta.write(enq_ptr, io.enq.bits.bpd_meta)
ram(enq_ptr) := new_entry
prev_pc := io.enq.bits.pc
prev_entry := new_entry
prev_ghist := new_ghist
enq_ptr := WrapInc(enq_ptr, num_entries)
}
io.enq_idx := enq_ptr
io.bpdupdate.valid := false.B
io.bpdupdate.bits := DontCare
when (io.deq.valid) {
deq_ptr := io.deq.bits
}
// This register avoids a spurious bpd update on the first fetch packet
val first_empty = RegInit(true.B)
// We can update the branch predictors when we know the target of the
// CFI in this fetch bundle
val ras_update = WireInit(false.B)
val ras_update_pc = WireInit(0.U(vaddrBitsExtended.W))
val ras_update_idx = WireInit(0.U(log2Ceil(nRasEntries).W))
io.ras_update := RegNext(ras_update)
io.ras_update_pc := RegNext(ras_update_pc)
io.ras_update_idx := RegNext(ras_update_idx)
val bpd_update_mispredict = RegInit(false.B)
val bpd_update_repair = RegInit(false.B)
val bpd_repair_idx = Reg(UInt(log2Ceil(ftqSz).W))
val bpd_end_idx = Reg(UInt(log2Ceil(ftqSz).W))
val bpd_repair_pc = Reg(UInt(vaddrBitsExtended.W))
val bpd_idx = Mux(io.redirect.valid, io.redirect.bits,
Mux(bpd_update_repair || bpd_update_mispredict, bpd_repair_idx, bpd_ptr))
val bpd_entry = RegNext(ram(bpd_idx))
val bpd_ghist = ghist(0).read(bpd_idx, true.B)
val bpd_lhist = if (useLHist) {
lhist.get.read(bpd_idx, true.B)
} else {
VecInit(Seq.fill(nBanks) { 0.U })
}
val bpd_meta = meta.read(bpd_idx, true.B) // TODO fix these SRAMs
val bpd_pc = RegNext(pcs(bpd_idx))
val bpd_target = RegNext(pcs(WrapInc(bpd_idx, num_entries)))
when (io.redirect.valid) {
bpd_update_mispredict := false.B
bpd_update_repair := false.B
} .elsewhen (RegNext(io.brupdate.b2.mispredict)) {
bpd_update_mispredict := true.B
bpd_repair_idx := RegNext(io.brupdate.b2.uop.ftq_idx)
bpd_end_idx := RegNext(enq_ptr)
} .elsewhen (bpd_update_mispredict) {
bpd_update_mispredict := false.B
bpd_update_repair := true.B
bpd_repair_idx := WrapInc(bpd_repair_idx, num_entries)
} .elsewhen (bpd_update_repair && RegNext(bpd_update_mispredict)) {
bpd_repair_pc := bpd_pc
bpd_repair_idx := WrapInc(bpd_repair_idx, num_entries)
} .elsewhen (bpd_update_repair) {
bpd_repair_idx := WrapInc(bpd_repair_idx, num_entries)
when (WrapInc(bpd_repair_idx, num_entries) === bpd_end_idx ||
bpd_pc === bpd_repair_pc) {
bpd_update_repair := false.B
}
}
val do_commit_update = (!bpd_update_mispredict &&
!bpd_update_repair &&
bpd_ptr =/= deq_ptr &&
enq_ptr =/= WrapInc(bpd_ptr, num_entries) &&
!io.brupdate.b2.mispredict &&
!io.redirect.valid && !RegNext(io.redirect.valid))
val do_mispredict_update = bpd_update_mispredict
val do_repair_update = bpd_update_repair
when (RegNext(do_commit_update || do_repair_update || do_mispredict_update)) {
val cfi_idx = bpd_entry.cfi_idx.bits
val valid_repair = bpd_pc =/= bpd_repair_pc
io.bpdupdate.valid := (!first_empty &&
(bpd_entry.cfi_idx.valid || bpd_entry.br_mask =/= 0.U) &&
!(RegNext(do_repair_update) && !valid_repair))
io.bpdupdate.bits.is_mispredict_update := RegNext(do_mispredict_update)
io.bpdupdate.bits.is_repair_update := RegNext(do_repair_update)
io.bpdupdate.bits.pc := bpd_pc
io.bpdupdate.bits.btb_mispredicts := 0.U
io.bpdupdate.bits.br_mask := Mux(bpd_entry.cfi_idx.valid,
MaskLower(UIntToOH(cfi_idx)) & bpd_entry.br_mask, bpd_entry.br_mask)
io.bpdupdate.bits.cfi_idx := bpd_entry.cfi_idx
io.bpdupdate.bits.cfi_mispredicted := bpd_entry.cfi_mispredicted
io.bpdupdate.bits.cfi_taken := bpd_entry.cfi_taken
io.bpdupdate.bits.target := bpd_target
io.bpdupdate.bits.cfi_is_br := bpd_entry.br_mask(cfi_idx)
io.bpdupdate.bits.cfi_is_jal := bpd_entry.cfi_type === CFI_JAL || bpd_entry.cfi_type === CFI_JALR
io.bpdupdate.bits.ghist := bpd_ghist
io.bpdupdate.bits.lhist := bpd_lhist
io.bpdupdate.bits.meta := bpd_meta
first_empty := false.B
}
when (do_commit_update) {
bpd_ptr := WrapInc(bpd_ptr, num_entries)
}
io.enq.ready := RegNext(!full || do_commit_update)
val redirect_idx = io.redirect.bits
val redirect_entry = ram(redirect_idx)
val redirect_new_entry = WireInit(redirect_entry)
when (io.redirect.valid) {
enq_ptr := WrapInc(io.redirect.bits, num_entries)
when (io.brupdate.b2.mispredict) {
val new_cfi_idx = (io.brupdate.b2.uop.pc_lob ^
Mux(redirect_entry.start_bank === 1.U, 1.U << log2Ceil(bankBytes), 0.U))(log2Ceil(fetchWidth), 1)
redirect_new_entry.cfi_idx.valid := true.B
redirect_new_entry.cfi_idx.bits := new_cfi_idx
redirect_new_entry.cfi_mispredicted := true.B
redirect_new_entry.cfi_taken := io.brupdate.b2.taken
redirect_new_entry.cfi_is_call := redirect_entry.cfi_is_call && redirect_entry.cfi_idx.bits === new_cfi_idx
redirect_new_entry.cfi_is_ret := redirect_entry.cfi_is_ret && redirect_entry.cfi_idx.bits === new_cfi_idx
}
ras_update := true.B
ras_update_pc := redirect_entry.ras_top
ras_update_idx := redirect_entry.ras_idx
} .elsewhen (RegNext(io.redirect.valid)) {
prev_entry := RegNext(redirect_new_entry)
prev_ghist := bpd_ghist
prev_pc := bpd_pc
ram(RegNext(io.redirect.bits)) := RegNext(redirect_new_entry)
}
//-------------------------------------------------------------
// **** Core Read PCs ****
//-------------------------------------------------------------
for (i <- 0 until 3) {
val idx = Mux(reset.asBool, 0.U(log2Ceil(ftqSz).W), io.arb_ftq_reqs(i))
val is_enq = (idx === enq_ptr) && io.enq.fire
val get_entry = ram(idx)
io.rrd_ftq_resps(i).entry := RegNext(get_entry)
if (i == 0) {
io.rrd_ftq_resps(i).ghist := ghist(1).read(idx, true.B)
} else {
io.rrd_ftq_resps(i).ghist := DontCare
}
io.rrd_ftq_resps(i).pc := RegNext(Mux(is_enq, io.enq.bits.pc, pcs(idx)))
io.rrd_ftq_resps(i).valid := RegNext(idx =/= enq_ptr || is_enq)
}
io.com_pc := RegNext(pcs(Mux(io.deq.valid, io.deq.bits, deq_ptr)))
for (w <- 0 until coreWidth) {
io.debug_fetch_pc(w) := RegNext(pcs(io.debug_ftq_idx(w)))
}
}
| module FetchTargetQueue( // @[fetch-target-queue.scala:82:7]
input clock, // @[fetch-target-queue.scala:82:7]
input reset, // @[fetch-target-queue.scala:82:7]
output io_enq_ready, // @[fetch-target-queue.scala:89:14]
input io_enq_valid, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_pc, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_next_pc, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_next_fetch, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_edge_inst_0, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_edge_inst_1, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_insts_0, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_insts_1, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_insts_2, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_insts_3, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_insts_4, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_insts_5, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_insts_6, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_insts_7, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_exp_insts_0, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_exp_insts_1, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_exp_insts_2, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_exp_insts_3, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_exp_insts_4, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_exp_insts_5, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_exp_insts_6, // @[fetch-target-queue.scala:89:14]
input [31:0] io_enq_bits_exp_insts_7, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_pcs_0, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_pcs_1, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_pcs_2, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_pcs_3, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_pcs_4, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_pcs_5, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_pcs_6, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_pcs_7, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_sfbs_0, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_sfbs_1, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_sfbs_2, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_sfbs_3, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_sfbs_4, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_sfbs_5, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_sfbs_6, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_sfbs_7, // @[fetch-target-queue.scala:89:14]
input [15:0] io_enq_bits_sfb_masks_0, // @[fetch-target-queue.scala:89:14]
input [15:0] io_enq_bits_sfb_masks_1, // @[fetch-target-queue.scala:89:14]
input [15:0] io_enq_bits_sfb_masks_2, // @[fetch-target-queue.scala:89:14]
input [15:0] io_enq_bits_sfb_masks_3, // @[fetch-target-queue.scala:89:14]
input [15:0] io_enq_bits_sfb_masks_4, // @[fetch-target-queue.scala:89:14]
input [15:0] io_enq_bits_sfb_masks_5, // @[fetch-target-queue.scala:89:14]
input [15:0] io_enq_bits_sfb_masks_6, // @[fetch-target-queue.scala:89:14]
input [15:0] io_enq_bits_sfb_masks_7, // @[fetch-target-queue.scala:89:14]
input [4:0] io_enq_bits_sfb_dests_0, // @[fetch-target-queue.scala:89:14]
input [4:0] io_enq_bits_sfb_dests_1, // @[fetch-target-queue.scala:89:14]
input [4:0] io_enq_bits_sfb_dests_2, // @[fetch-target-queue.scala:89:14]
input [4:0] io_enq_bits_sfb_dests_3, // @[fetch-target-queue.scala:89:14]
input [4:0] io_enq_bits_sfb_dests_4, // @[fetch-target-queue.scala:89:14]
input [4:0] io_enq_bits_sfb_dests_5, // @[fetch-target-queue.scala:89:14]
input [4:0] io_enq_bits_sfb_dests_6, // @[fetch-target-queue.scala:89:14]
input [4:0] io_enq_bits_sfb_dests_7, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowable_mask_0, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowable_mask_1, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowable_mask_2, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowable_mask_3, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowable_mask_4, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowable_mask_5, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowable_mask_6, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowable_mask_7, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowed_mask_0, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowed_mask_1, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowed_mask_2, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowed_mask_3, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowed_mask_4, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowed_mask_5, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowed_mask_6, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_shadowed_mask_7, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_cfi_idx_valid, // @[fetch-target-queue.scala:89:14]
input [2:0] io_enq_bits_cfi_idx_bits, // @[fetch-target-queue.scala:89:14]
input [2:0] io_enq_bits_cfi_type, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_cfi_is_call, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_cfi_is_ret, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_cfi_npc_plus4, // @[fetch-target-queue.scala:89:14]
input [39:0] io_enq_bits_ras_top, // @[fetch-target-queue.scala:89:14]
input [4:0] io_enq_bits_ftq_idx, // @[fetch-target-queue.scala:89:14]
input [7:0] io_enq_bits_mask, // @[fetch-target-queue.scala:89:14]
input [7:0] io_enq_bits_br_mask, // @[fetch-target-queue.scala:89:14]
input [63:0] io_enq_bits_ghist_old_history, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_ghist_current_saw_branch_not_taken, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_ghist_new_saw_branch_not_taken, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_ghist_new_saw_branch_taken, // @[fetch-target-queue.scala:89:14]
input [4:0] io_enq_bits_ghist_ras_idx, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_lhist_0, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_lhist_1, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_xcpt_pf_if, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_xcpt_ae_if, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_debug_if_oh_0, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_debug_if_oh_1, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_debug_if_oh_2, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_debug_if_oh_3, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_debug_if_oh_4, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_debug_if_oh_5, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_debug_if_oh_6, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_debug_if_oh_7, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_xcpt_if_oh_0, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_xcpt_if_oh_1, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_xcpt_if_oh_2, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_xcpt_if_oh_3, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_xcpt_if_oh_4, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_xcpt_if_oh_5, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_xcpt_if_oh_6, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_bp_xcpt_if_oh_7, // @[fetch-target-queue.scala:89:14]
input io_enq_bits_end_half_valid, // @[fetch-target-queue.scala:89:14]
input [15:0] io_enq_bits_end_half_bits, // @[fetch-target-queue.scala:89:14]
input [119:0] io_enq_bits_bpd_meta_0, // @[fetch-target-queue.scala:89:14]
input [119:0] io_enq_bits_bpd_meta_1, // @[fetch-target-queue.scala:89:14]
input [2:0] io_enq_bits_fsrc, // @[fetch-target-queue.scala:89:14]
input [2:0] io_enq_bits_tsrc, // @[fetch-target-queue.scala:89:14]
output [4:0] io_enq_idx, // @[fetch-target-queue.scala:89:14]
input io_deq_valid, // @[fetch-target-queue.scala:89:14]
input [4:0] io_deq_bits, // @[fetch-target-queue.scala:89:14]
input [4:0] io_arb_ftq_reqs_0, // @[fetch-target-queue.scala:89:14]
input [4:0] io_arb_ftq_reqs_1, // @[fetch-target-queue.scala:89:14]
input [4:0] io_arb_ftq_reqs_2, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_valid, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_entry_cfi_idx_valid, // @[fetch-target-queue.scala:89:14]
output [2:0] io_rrd_ftq_resps_0_entry_cfi_idx_bits, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_entry_cfi_taken, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_entry_cfi_mispredicted, // @[fetch-target-queue.scala:89:14]
output [2:0] io_rrd_ftq_resps_0_entry_cfi_type, // @[fetch-target-queue.scala:89:14]
output [7:0] io_rrd_ftq_resps_0_entry_br_mask, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_entry_cfi_is_call, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_entry_cfi_is_ret, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_entry_cfi_npc_plus4, // @[fetch-target-queue.scala:89:14]
output [39:0] io_rrd_ftq_resps_0_entry_ras_top, // @[fetch-target-queue.scala:89:14]
output [4:0] io_rrd_ftq_resps_0_entry_ras_idx, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_entry_start_bank, // @[fetch-target-queue.scala:89:14]
output [63:0] io_rrd_ftq_resps_0_ghist_old_history, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_ghist_current_saw_branch_not_taken, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_ghist_new_saw_branch_not_taken, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_0_ghist_new_saw_branch_taken, // @[fetch-target-queue.scala:89:14]
output [4:0] io_rrd_ftq_resps_0_ghist_ras_idx, // @[fetch-target-queue.scala:89:14]
output [39:0] io_rrd_ftq_resps_0_pc, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_1_valid, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_1_entry_cfi_idx_valid, // @[fetch-target-queue.scala:89:14]
output [2:0] io_rrd_ftq_resps_1_entry_cfi_idx_bits, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_1_entry_cfi_taken, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_1_entry_cfi_mispredicted, // @[fetch-target-queue.scala:89:14]
output [2:0] io_rrd_ftq_resps_1_entry_cfi_type, // @[fetch-target-queue.scala:89:14]
output [7:0] io_rrd_ftq_resps_1_entry_br_mask, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_1_entry_cfi_is_call, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_1_entry_cfi_is_ret, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_1_entry_cfi_npc_plus4, // @[fetch-target-queue.scala:89:14]
output [39:0] io_rrd_ftq_resps_1_entry_ras_top, // @[fetch-target-queue.scala:89:14]
output [4:0] io_rrd_ftq_resps_1_entry_ras_idx, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_1_entry_start_bank, // @[fetch-target-queue.scala:89:14]
output [39:0] io_rrd_ftq_resps_1_pc, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_2_valid, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_2_entry_cfi_idx_valid, // @[fetch-target-queue.scala:89:14]
output [2:0] io_rrd_ftq_resps_2_entry_cfi_idx_bits, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_2_entry_cfi_taken, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_2_entry_cfi_mispredicted, // @[fetch-target-queue.scala:89:14]
output [2:0] io_rrd_ftq_resps_2_entry_cfi_type, // @[fetch-target-queue.scala:89:14]
output [7:0] io_rrd_ftq_resps_2_entry_br_mask, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_2_entry_cfi_is_call, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_2_entry_cfi_is_ret, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_2_entry_cfi_npc_plus4, // @[fetch-target-queue.scala:89:14]
output [39:0] io_rrd_ftq_resps_2_entry_ras_top, // @[fetch-target-queue.scala:89:14]
output [4:0] io_rrd_ftq_resps_2_entry_ras_idx, // @[fetch-target-queue.scala:89:14]
output io_rrd_ftq_resps_2_entry_start_bank, // @[fetch-target-queue.scala:89:14]
output [39:0] io_rrd_ftq_resps_2_pc, // @[fetch-target-queue.scala:89:14]
output [39:0] io_com_pc, // @[fetch-target-queue.scala:89:14]
output [39:0] io_debug_fetch_pc_0, // @[fetch-target-queue.scala:89:14]
output [39:0] io_debug_fetch_pc_1, // @[fetch-target-queue.scala:89:14]
output [39:0] io_debug_fetch_pc_2, // @[fetch-target-queue.scala:89:14]
input io_redirect_valid, // @[fetch-target-queue.scala:89:14]
input [4:0] io_redirect_bits, // @[fetch-target-queue.scala:89:14]
input [15:0] io_brupdate_b1_resolve_mask, // @[fetch-target-queue.scala:89:14]
input [15:0] io_brupdate_b1_mispredict_mask, // @[fetch-target-queue.scala:89:14]
input [31:0] io_brupdate_b2_uop_inst, // @[fetch-target-queue.scala:89:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_rvc, // @[fetch-target-queue.scala:89:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_iq_type_0, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_iq_type_1, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_iq_type_2, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_iq_type_3, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fu_code_0, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fu_code_1, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fu_code_2, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fu_code_3, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fu_code_4, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fu_code_5, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fu_code_6, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fu_code_7, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fu_code_8, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fu_code_9, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_iw_issued, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_iw_issued_partial_agen, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_iw_issued_partial_dgen, // @[fetch-target-queue.scala:89:14]
input [2:0] io_brupdate_b2_uop_iw_p1_speculative_child, // @[fetch-target-queue.scala:89:14]
input [2:0] io_brupdate_b2_uop_iw_p2_speculative_child, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_iw_p1_bypass_hint, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_iw_p2_bypass_hint, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_iw_p3_bypass_hint, // @[fetch-target-queue.scala:89:14]
input [2:0] io_brupdate_b2_uop_dis_col_sel, // @[fetch-target-queue.scala:89:14]
input [15:0] io_brupdate_b2_uop_br_mask, // @[fetch-target-queue.scala:89:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[fetch-target-queue.scala:89:14]
input [3:0] io_brupdate_b2_uop_br_type, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_sfb, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_fence, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_fencei, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_sfence, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_amo, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_eret, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_rocc, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_mov, // @[fetch-target-queue.scala:89:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_edge_inst, // @[fetch-target-queue.scala:89:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_taken, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_imm_rename, // @[fetch-target-queue.scala:89:14]
input [2:0] io_brupdate_b2_uop_imm_sel, // @[fetch-target-queue.scala:89:14]
input [4:0] io_brupdate_b2_uop_pimm, // @[fetch-target-queue.scala:89:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[fetch-target-queue.scala:89:14]
input [1:0] io_brupdate_b2_uop_op1_sel, // @[fetch-target-queue.scala:89:14]
input [2:0] io_brupdate_b2_uop_op2_sel, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_ldst, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_wen, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_ren1, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_ren2, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_ren3, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_swap12, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_swap23, // @[fetch-target-queue.scala:89:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn, // @[fetch-target-queue.scala:89:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_fromint, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_toint, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_fastpipe, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_fma, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_div, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_sqrt, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_wflags, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_ctrl_vec, // @[fetch-target-queue.scala:89:14]
input [6:0] io_brupdate_b2_uop_rob_idx, // @[fetch-target-queue.scala:89:14]
input [4:0] io_brupdate_b2_uop_ldq_idx, // @[fetch-target-queue.scala:89:14]
input [4:0] io_brupdate_b2_uop_stq_idx, // @[fetch-target-queue.scala:89:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[fetch-target-queue.scala:89:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[fetch-target-queue.scala:89:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[fetch-target-queue.scala:89:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[fetch-target-queue.scala:89:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[fetch-target-queue.scala:89:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_prs1_busy, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_prs2_busy, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_prs3_busy, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_ppred_busy, // @[fetch-target-queue.scala:89:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_exception, // @[fetch-target-queue.scala:89:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[fetch-target-queue.scala:89:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[fetch-target-queue.scala:89:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_mem_signed, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_uses_ldq, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_uses_stq, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_is_unique, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_flush_on_commit, // @[fetch-target-queue.scala:89:14]
input [2:0] io_brupdate_b2_uop_csr_cmd, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[fetch-target-queue.scala:89:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[fetch-target-queue.scala:89:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[fetch-target-queue.scala:89:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[fetch-target-queue.scala:89:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[fetch-target-queue.scala:89:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[fetch-target-queue.scala:89:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[fetch-target-queue.scala:89:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_frs3_en, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fcn_dw, // @[fetch-target-queue.scala:89:14]
input [4:0] io_brupdate_b2_uop_fcn_op, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_fp_val, // @[fetch-target-queue.scala:89:14]
input [2:0] io_brupdate_b2_uop_fp_rm, // @[fetch-target-queue.scala:89:14]
input [1:0] io_brupdate_b2_uop_fp_typ, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_bp_debug_if, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[fetch-target-queue.scala:89:14]
input [2:0] io_brupdate_b2_uop_debug_fsrc, // @[fetch-target-queue.scala:89:14]
input [2:0] io_brupdate_b2_uop_debug_tsrc, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_mispredict, // @[fetch-target-queue.scala:89:14]
input io_brupdate_b2_taken, // @[fetch-target-queue.scala:89:14]
input [2:0] io_brupdate_b2_cfi_type, // @[fetch-target-queue.scala:89:14]
input [1:0] io_brupdate_b2_pc_sel, // @[fetch-target-queue.scala:89:14]
input [39:0] io_brupdate_b2_jalr_target, // @[fetch-target-queue.scala:89:14]
input [20:0] io_brupdate_b2_target_offset, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_valid, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_bits_is_mispredict_update, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_bits_is_repair_update, // @[fetch-target-queue.scala:89:14]
output [39:0] io_bpdupdate_bits_pc, // @[fetch-target-queue.scala:89:14]
output [7:0] io_bpdupdate_bits_br_mask, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_bits_cfi_idx_valid, // @[fetch-target-queue.scala:89:14]
output [2:0] io_bpdupdate_bits_cfi_idx_bits, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_bits_cfi_taken, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_bits_cfi_mispredicted, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_bits_cfi_is_br, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_bits_cfi_is_jal, // @[fetch-target-queue.scala:89:14]
output [63:0] io_bpdupdate_bits_ghist_old_history, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_bits_ghist_current_saw_branch_not_taken, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_bits_ghist_new_saw_branch_not_taken, // @[fetch-target-queue.scala:89:14]
output io_bpdupdate_bits_ghist_new_saw_branch_taken, // @[fetch-target-queue.scala:89:14]
output [4:0] io_bpdupdate_bits_ghist_ras_idx, // @[fetch-target-queue.scala:89:14]
output [39:0] io_bpdupdate_bits_target, // @[fetch-target-queue.scala:89:14]
output [119:0] io_bpdupdate_bits_meta_0, // @[fetch-target-queue.scala:89:14]
output [119:0] io_bpdupdate_bits_meta_1, // @[fetch-target-queue.scala:89:14]
output io_ras_update, // @[fetch-target-queue.scala:89:14]
output [4:0] io_ras_update_idx, // @[fetch-target-queue.scala:89:14]
output [39:0] io_ras_update_pc // @[fetch-target-queue.scala:89:14]
);
wire [71:0] _ghist_1_R0_data; // @[fetch-target-queue.scala:131:43]
wire [71:0] _ghist_0_R0_data; // @[fetch-target-queue.scala:131:43]
wire [239:0] _meta_R0_data; // @[fetch-target-queue.scala:129:29]
wire io_enq_valid_0 = io_enq_valid; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_pc_0 = io_enq_bits_pc; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_next_pc_0 = io_enq_bits_next_pc; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_next_fetch_0 = io_enq_bits_next_fetch; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_edge_inst_0_0 = io_enq_bits_edge_inst_0; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_edge_inst_1_0 = io_enq_bits_edge_inst_1; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_insts_0_0 = io_enq_bits_insts_0; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_insts_1_0 = io_enq_bits_insts_1; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_insts_2_0 = io_enq_bits_insts_2; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_insts_3_0 = io_enq_bits_insts_3; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_insts_4_0 = io_enq_bits_insts_4; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_insts_5_0 = io_enq_bits_insts_5; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_insts_6_0 = io_enq_bits_insts_6; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_insts_7_0 = io_enq_bits_insts_7; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_exp_insts_0_0 = io_enq_bits_exp_insts_0; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_exp_insts_1_0 = io_enq_bits_exp_insts_1; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_exp_insts_2_0 = io_enq_bits_exp_insts_2; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_exp_insts_3_0 = io_enq_bits_exp_insts_3; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_exp_insts_4_0 = io_enq_bits_exp_insts_4; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_exp_insts_5_0 = io_enq_bits_exp_insts_5; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_exp_insts_6_0 = io_enq_bits_exp_insts_6; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_enq_bits_exp_insts_7_0 = io_enq_bits_exp_insts_7; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_pcs_0_0 = io_enq_bits_pcs_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_pcs_1_0 = io_enq_bits_pcs_1; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_pcs_2_0 = io_enq_bits_pcs_2; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_pcs_3_0 = io_enq_bits_pcs_3; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_pcs_4_0 = io_enq_bits_pcs_4; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_pcs_5_0 = io_enq_bits_pcs_5; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_pcs_6_0 = io_enq_bits_pcs_6; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_pcs_7_0 = io_enq_bits_pcs_7; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_sfbs_0_0 = io_enq_bits_sfbs_0; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_sfbs_1_0 = io_enq_bits_sfbs_1; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_sfbs_2_0 = io_enq_bits_sfbs_2; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_sfbs_3_0 = io_enq_bits_sfbs_3; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_sfbs_4_0 = io_enq_bits_sfbs_4; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_sfbs_5_0 = io_enq_bits_sfbs_5; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_sfbs_6_0 = io_enq_bits_sfbs_6; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_sfbs_7_0 = io_enq_bits_sfbs_7; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_enq_bits_sfb_masks_0_0 = io_enq_bits_sfb_masks_0; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_enq_bits_sfb_masks_1_0 = io_enq_bits_sfb_masks_1; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_enq_bits_sfb_masks_2_0 = io_enq_bits_sfb_masks_2; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_enq_bits_sfb_masks_3_0 = io_enq_bits_sfb_masks_3; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_enq_bits_sfb_masks_4_0 = io_enq_bits_sfb_masks_4; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_enq_bits_sfb_masks_5_0 = io_enq_bits_sfb_masks_5; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_enq_bits_sfb_masks_6_0 = io_enq_bits_sfb_masks_6; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_enq_bits_sfb_masks_7_0 = io_enq_bits_sfb_masks_7; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_bits_sfb_dests_0_0 = io_enq_bits_sfb_dests_0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_bits_sfb_dests_1_0 = io_enq_bits_sfb_dests_1; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_bits_sfb_dests_2_0 = io_enq_bits_sfb_dests_2; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_bits_sfb_dests_3_0 = io_enq_bits_sfb_dests_3; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_bits_sfb_dests_4_0 = io_enq_bits_sfb_dests_4; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_bits_sfb_dests_5_0 = io_enq_bits_sfb_dests_5; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_bits_sfb_dests_6_0 = io_enq_bits_sfb_dests_6; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_bits_sfb_dests_7_0 = io_enq_bits_sfb_dests_7; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowable_mask_0_0 = io_enq_bits_shadowable_mask_0; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowable_mask_1_0 = io_enq_bits_shadowable_mask_1; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowable_mask_2_0 = io_enq_bits_shadowable_mask_2; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowable_mask_3_0 = io_enq_bits_shadowable_mask_3; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowable_mask_4_0 = io_enq_bits_shadowable_mask_4; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowable_mask_5_0 = io_enq_bits_shadowable_mask_5; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowable_mask_6_0 = io_enq_bits_shadowable_mask_6; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowable_mask_7_0 = io_enq_bits_shadowable_mask_7; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowed_mask_0_0 = io_enq_bits_shadowed_mask_0; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowed_mask_1_0 = io_enq_bits_shadowed_mask_1; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowed_mask_2_0 = io_enq_bits_shadowed_mask_2; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowed_mask_3_0 = io_enq_bits_shadowed_mask_3; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowed_mask_4_0 = io_enq_bits_shadowed_mask_4; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowed_mask_5_0 = io_enq_bits_shadowed_mask_5; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowed_mask_6_0 = io_enq_bits_shadowed_mask_6; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_shadowed_mask_7_0 = io_enq_bits_shadowed_mask_7; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_cfi_idx_valid_0 = io_enq_bits_cfi_idx_valid; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_enq_bits_cfi_idx_bits_0 = io_enq_bits_cfi_idx_bits; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_enq_bits_cfi_type_0 = io_enq_bits_cfi_type; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_cfi_is_call_0 = io_enq_bits_cfi_is_call; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_cfi_is_ret_0 = io_enq_bits_cfi_is_ret; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_cfi_npc_plus4_0 = io_enq_bits_cfi_npc_plus4; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_enq_bits_ras_top_0 = io_enq_bits_ras_top; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_bits_ftq_idx_0 = io_enq_bits_ftq_idx; // @[fetch-target-queue.scala:82:7]
wire [7:0] io_enq_bits_mask_0 = io_enq_bits_mask; // @[fetch-target-queue.scala:82:7]
wire [7:0] io_enq_bits_br_mask_0 = io_enq_bits_br_mask; // @[fetch-target-queue.scala:82:7]
wire [63:0] io_enq_bits_ghist_old_history_0 = io_enq_bits_ghist_old_history; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_ghist_current_saw_branch_not_taken_0 = io_enq_bits_ghist_current_saw_branch_not_taken; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_ghist_new_saw_branch_not_taken_0 = io_enq_bits_ghist_new_saw_branch_not_taken; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_ghist_new_saw_branch_taken_0 = io_enq_bits_ghist_new_saw_branch_taken; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_bits_ghist_ras_idx_0 = io_enq_bits_ghist_ras_idx; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_lhist_0_0 = io_enq_bits_lhist_0; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_lhist_1_0 = io_enq_bits_lhist_1; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_xcpt_pf_if_0 = io_enq_bits_xcpt_pf_if; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_xcpt_ae_if_0 = io_enq_bits_xcpt_ae_if; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_debug_if_oh_0_0 = io_enq_bits_bp_debug_if_oh_0; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_debug_if_oh_1_0 = io_enq_bits_bp_debug_if_oh_1; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_debug_if_oh_2_0 = io_enq_bits_bp_debug_if_oh_2; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_debug_if_oh_3_0 = io_enq_bits_bp_debug_if_oh_3; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_debug_if_oh_4_0 = io_enq_bits_bp_debug_if_oh_4; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_debug_if_oh_5_0 = io_enq_bits_bp_debug_if_oh_5; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_debug_if_oh_6_0 = io_enq_bits_bp_debug_if_oh_6; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_debug_if_oh_7_0 = io_enq_bits_bp_debug_if_oh_7; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_xcpt_if_oh_0_0 = io_enq_bits_bp_xcpt_if_oh_0; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_xcpt_if_oh_1_0 = io_enq_bits_bp_xcpt_if_oh_1; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_xcpt_if_oh_2_0 = io_enq_bits_bp_xcpt_if_oh_2; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_xcpt_if_oh_3_0 = io_enq_bits_bp_xcpt_if_oh_3; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_xcpt_if_oh_4_0 = io_enq_bits_bp_xcpt_if_oh_4; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_xcpt_if_oh_5_0 = io_enq_bits_bp_xcpt_if_oh_5; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_xcpt_if_oh_6_0 = io_enq_bits_bp_xcpt_if_oh_6; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_bp_xcpt_if_oh_7_0 = io_enq_bits_bp_xcpt_if_oh_7; // @[fetch-target-queue.scala:82:7]
wire io_enq_bits_end_half_valid_0 = io_enq_bits_end_half_valid; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_enq_bits_end_half_bits_0 = io_enq_bits_end_half_bits; // @[fetch-target-queue.scala:82:7]
wire [119:0] io_enq_bits_bpd_meta_0_0 = io_enq_bits_bpd_meta_0; // @[fetch-target-queue.scala:82:7]
wire [119:0] io_enq_bits_bpd_meta_1_0 = io_enq_bits_bpd_meta_1; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_enq_bits_fsrc_0 = io_enq_bits_fsrc; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_enq_bits_tsrc_0 = io_enq_bits_tsrc; // @[fetch-target-queue.scala:82:7]
wire io_deq_valid_0 = io_deq_valid; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_deq_bits_0 = io_deq_bits; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_arb_ftq_reqs_0_0 = io_arb_ftq_reqs_0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_arb_ftq_reqs_1_0 = io_arb_ftq_reqs_1; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_arb_ftq_reqs_2_0 = io_arb_ftq_reqs_2; // @[fetch-target-queue.scala:82:7]
wire io_redirect_valid_0 = io_redirect_valid; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_redirect_bits_0 = io_redirect_bits; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[fetch-target-queue.scala:82:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_iq_type_0_0 = io_brupdate_b2_uop_iq_type_0; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_iq_type_1_0 = io_brupdate_b2_uop_iq_type_1; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_iq_type_2_0 = io_brupdate_b2_uop_iq_type_2; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_iq_type_3_0 = io_brupdate_b2_uop_iq_type_3; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fu_code_0_0 = io_brupdate_b2_uop_fu_code_0; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fu_code_1_0 = io_brupdate_b2_uop_fu_code_1; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fu_code_2_0 = io_brupdate_b2_uop_fu_code_2; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fu_code_3_0 = io_brupdate_b2_uop_fu_code_3; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fu_code_4_0 = io_brupdate_b2_uop_fu_code_4; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fu_code_5_0 = io_brupdate_b2_uop_fu_code_5; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fu_code_6_0 = io_brupdate_b2_uop_fu_code_6; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fu_code_7_0 = io_brupdate_b2_uop_fu_code_7; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fu_code_8_0 = io_brupdate_b2_uop_fu_code_8; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fu_code_9_0 = io_brupdate_b2_uop_fu_code_9; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_iw_issued_0 = io_brupdate_b2_uop_iw_issued; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen_0 = io_brupdate_b2_uop_iw_issued_partial_agen; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen_0 = io_brupdate_b2_uop_iw_issued_partial_dgen; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_brupdate_b2_uop_iw_p1_speculative_child_0 = io_brupdate_b2_uop_iw_p1_speculative_child; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_brupdate_b2_uop_iw_p2_speculative_child_0 = io_brupdate_b2_uop_iw_p2_speculative_child; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint_0 = io_brupdate_b2_uop_iw_p1_bypass_hint; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint_0 = io_brupdate_b2_uop_iw_p2_bypass_hint; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint_0 = io_brupdate_b2_uop_iw_p3_bypass_hint; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_brupdate_b2_uop_dis_col_sel_0 = io_brupdate_b2_uop_dis_col_sel; // @[fetch-target-queue.scala:82:7]
wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[fetch-target-queue.scala:82:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[fetch-target-queue.scala:82:7]
wire [3:0] io_brupdate_b2_uop_br_type_0 = io_brupdate_b2_uop_br_type; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_sfence_0 = io_brupdate_b2_uop_is_sfence; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_eret_0 = io_brupdate_b2_uop_is_eret; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_rocc_0 = io_brupdate_b2_uop_is_rocc; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_mov_0 = io_brupdate_b2_uop_is_mov; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[fetch-target-queue.scala:82:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_imm_rename_0 = io_brupdate_b2_uop_imm_rename; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_brupdate_b2_uop_imm_sel_0 = io_brupdate_b2_uop_imm_sel; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_brupdate_b2_uop_pimm_0 = io_brupdate_b2_uop_pimm; // @[fetch-target-queue.scala:82:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[fetch-target-queue.scala:82:7]
wire [1:0] io_brupdate_b2_uop_op1_sel_0 = io_brupdate_b2_uop_op1_sel; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_brupdate_b2_uop_op2_sel_0 = io_brupdate_b2_uop_op2_sel; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst_0 = io_brupdate_b2_uop_fp_ctrl_ldst; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_wen_0 = io_brupdate_b2_uop_fp_ctrl_wen; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1_0 = io_brupdate_b2_uop_fp_ctrl_ren1; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2_0 = io_brupdate_b2_uop_fp_ctrl_ren2; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3_0 = io_brupdate_b2_uop_fp_ctrl_ren3; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12_0 = io_brupdate_b2_uop_fp_ctrl_swap12; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23_0 = io_brupdate_b2_uop_fp_ctrl_swap23; // @[fetch-target-queue.scala:82:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn_0 = io_brupdate_b2_uop_fp_ctrl_typeTagIn; // @[fetch-target-queue.scala:82:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut_0 = io_brupdate_b2_uop_fp_ctrl_typeTagOut; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint_0 = io_brupdate_b2_uop_fp_ctrl_fromint; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_toint_0 = io_brupdate_b2_uop_fp_ctrl_toint; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe_0 = io_brupdate_b2_uop_fp_ctrl_fastpipe; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_fma_0 = io_brupdate_b2_uop_fp_ctrl_fma; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_div_0 = io_brupdate_b2_uop_fp_ctrl_div; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt_0 = io_brupdate_b2_uop_fp_ctrl_sqrt; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags_0 = io_brupdate_b2_uop_fp_ctrl_wflags; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_ctrl_vec_0 = io_brupdate_b2_uop_fp_ctrl_vec; // @[fetch-target-queue.scala:82:7]
wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[fetch-target-queue.scala:82:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[fetch-target-queue.scala:82:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[fetch-target-queue.scala:82:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[fetch-target-queue.scala:82:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[fetch-target-queue.scala:82:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[fetch-target-queue.scala:82:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[fetch-target-queue.scala:82:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[fetch-target-queue.scala:82:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_brupdate_b2_uop_csr_cmd_0 = io_brupdate_b2_uop_csr_cmd; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[fetch-target-queue.scala:82:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[fetch-target-queue.scala:82:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[fetch-target-queue.scala:82:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[fetch-target-queue.scala:82:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[fetch-target-queue.scala:82:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[fetch-target-queue.scala:82:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[fetch-target-queue.scala:82:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fcn_dw_0 = io_brupdate_b2_uop_fcn_dw; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_brupdate_b2_uop_fcn_op_0 = io_brupdate_b2_uop_fcn_op; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_brupdate_b2_uop_fp_rm_0 = io_brupdate_b2_uop_fp_rm; // @[fetch-target-queue.scala:82:7]
wire [1:0] io_brupdate_b2_uop_fp_typ_0 = io_brupdate_b2_uop_fp_typ; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[fetch-target-queue.scala:82:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[fetch-target-queue.scala:82:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[fetch-target-queue.scala:82:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[fetch-target-queue.scala:82:7]
wire _idx_T = reset; // @[fetch-target-queue.scala:332:25]
wire _idx_T_1 = reset; // @[fetch-target-queue.scala:332:25]
wire _idx_T_2 = reset; // @[fetch-target-queue.scala:332:25]
wire [2:0] _prev_entry_WIRE_cfi_idx_bits = 3'h0; // @[fetch-target-queue.scala:143:42]
wire [2:0] _prev_entry_WIRE_cfi_type = 3'h0; // @[fetch-target-queue.scala:143:42]
wire [39:0] _prev_entry_WIRE_ras_top = 40'h0; // @[fetch-target-queue.scala:143:42]
wire [7:0] _new_ghist_not_taken_branches_T_19 = 8'hFF; // @[frontend.scala:78:45]
wire [3:0] _new_cfi_idx_T_1 = 4'h8; // @[fetch-target-queue.scala:306:50]
wire [7:0] io_bpdupdate_bits_btb_mispredicts = 8'h0; // @[fetch-target-queue.scala:82:7]
wire [7:0] _prev_entry_WIRE_br_mask = 8'h0; // @[fetch-target-queue.scala:143:42]
wire [4:0] io_rrd_ftq_resps_1_ghist_ras_idx = 5'h0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_rrd_ftq_resps_2_ghist_ras_idx = 5'h0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_debug_ftq_idx_0 = 5'h0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_debug_ftq_idx_1 = 5'h0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_debug_ftq_idx_2 = 5'h0; // @[fetch-target-queue.scala:82:7]
wire [4:0] _prev_ghist_WIRE_ras_idx = 5'h0; // @[fetch-target-queue.scala:142:42]
wire [4:0] _prev_entry_WIRE_ras_idx = 5'h0; // @[fetch-target-queue.scala:143:42]
wire io_rrd_ftq_resps_1_ghist_current_saw_branch_not_taken = 1'h0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_1_ghist_new_saw_branch_not_taken = 1'h0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_1_ghist_new_saw_branch_taken = 1'h0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_ghist_current_saw_branch_not_taken = 1'h0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_ghist_new_saw_branch_not_taken = 1'h0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_ghist_new_saw_branch_taken = 1'h0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_cfi_is_jalr = 1'h0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_lhist_0 = 1'h0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_lhist_1 = 1'h0; // @[fetch-target-queue.scala:82:7]
wire _prev_ghist_WIRE_current_saw_branch_not_taken = 1'h0; // @[fetch-target-queue.scala:142:42]
wire _prev_ghist_WIRE_new_saw_branch_not_taken = 1'h0; // @[fetch-target-queue.scala:142:42]
wire _prev_ghist_WIRE_new_saw_branch_taken = 1'h0; // @[fetch-target-queue.scala:142:42]
wire _prev_entry_WIRE_cfi_idx_valid = 1'h0; // @[fetch-target-queue.scala:143:42]
wire _prev_entry_WIRE_cfi_taken = 1'h0; // @[fetch-target-queue.scala:143:42]
wire _prev_entry_WIRE_cfi_mispredicted = 1'h0; // @[fetch-target-queue.scala:143:42]
wire _prev_entry_WIRE_cfi_is_call = 1'h0; // @[fetch-target-queue.scala:143:42]
wire _prev_entry_WIRE_cfi_is_ret = 1'h0; // @[fetch-target-queue.scala:143:42]
wire _prev_entry_WIRE_cfi_npc_plus4 = 1'h0; // @[fetch-target-queue.scala:143:42]
wire _prev_entry_WIRE_start_bank = 1'h0; // @[fetch-target-queue.scala:143:42]
wire new_entry_cfi_mispredicted = 1'h0; // @[fetch-target-queue.scala:149:25]
wire new_ghist_new_history_current_saw_branch_not_taken = 1'h0; // @[frontend.scala:74:27]
wire bpd_lhist_0 = 1'h0; // @[fetch-target-queue.scala:226:12]
wire bpd_lhist_1 = 1'h0; // @[fetch-target-queue.scala:226:12]
wire [63:0] io_rrd_ftq_resps_1_ghist_old_history = 64'h0; // @[fetch-target-queue.scala:82:7]
wire [63:0] io_rrd_ftq_resps_2_ghist_old_history = 64'h0; // @[fetch-target-queue.scala:82:7]
wire [63:0] _prev_ghist_WIRE_old_history = 64'h0; // @[fetch-target-queue.scala:142:42]
wire new_entry_cfi_idx_valid = io_enq_bits_cfi_idx_valid_0; // @[fetch-target-queue.scala:82:7, :149:25]
wire new_entry_cfi_taken = io_enq_bits_cfi_idx_valid_0; // @[fetch-target-queue.scala:82:7, :149:25]
wire [2:0] new_entry_cfi_idx_bits = io_enq_bits_cfi_idx_bits_0; // @[fetch-target-queue.scala:82:7, :149:25]
wire [2:0] new_entry_cfi_type = io_enq_bits_cfi_type_0; // @[fetch-target-queue.scala:82:7, :149:25]
wire new_entry_cfi_is_call = io_enq_bits_cfi_is_call_0; // @[fetch-target-queue.scala:82:7, :149:25]
wire new_entry_cfi_is_ret = io_enq_bits_cfi_is_ret_0; // @[fetch-target-queue.scala:82:7, :149:25]
wire new_entry_cfi_npc_plus4 = io_enq_bits_cfi_npc_plus4_0; // @[fetch-target-queue.scala:82:7, :149:25]
wire [39:0] new_entry_ras_top = io_enq_bits_ras_top_0; // @[fetch-target-queue.scala:82:7, :149:25]
wire new_ghist_current_saw_branch_not_taken = io_enq_bits_ghist_current_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7, :165:24]
wire [4:0] new_entry_ras_idx = io_enq_bits_ghist_ras_idx_0; // @[fetch-target-queue.scala:82:7, :149:25]
wire ras_update = io_redirect_valid_0; // @[fetch-target-queue.scala:82:7, :206:28]
wire [7:0] _io_bpdupdate_bits_br_mask_T_17; // @[fetch-target-queue.scala:276:37]
wire _io_bpdupdate_bits_cfi_is_br_T_1; // @[fetch-target-queue.scala:282:54]
wire _io_bpdupdate_bits_cfi_is_jal_T_2; // @[fetch-target-queue.scala:283:68]
wire io_enq_ready_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_entry_cfi_idx_valid_0; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_rrd_ftq_resps_0_entry_cfi_idx_bits_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_entry_cfi_taken_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_entry_cfi_mispredicted_0; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_rrd_ftq_resps_0_entry_cfi_type_0; // @[fetch-target-queue.scala:82:7]
wire [7:0] io_rrd_ftq_resps_0_entry_br_mask_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_entry_cfi_is_call_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_entry_cfi_is_ret_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_entry_cfi_npc_plus4_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_rrd_ftq_resps_0_entry_ras_top_0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_rrd_ftq_resps_0_entry_ras_idx_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_entry_start_bank_0; // @[fetch-target-queue.scala:82:7]
wire [63:0] io_rrd_ftq_resps_0_ghist_old_history_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_ghist_current_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_ghist_new_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_ghist_new_saw_branch_taken_0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_rrd_ftq_resps_0_ghist_ras_idx_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_0_valid_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_rrd_ftq_resps_0_pc_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_1_entry_cfi_idx_valid_0; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_rrd_ftq_resps_1_entry_cfi_idx_bits_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_1_entry_cfi_taken_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_1_entry_cfi_mispredicted_0; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_rrd_ftq_resps_1_entry_cfi_type_0; // @[fetch-target-queue.scala:82:7]
wire [7:0] io_rrd_ftq_resps_1_entry_br_mask_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_1_entry_cfi_is_call_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_1_entry_cfi_is_ret_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_1_entry_cfi_npc_plus4_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_rrd_ftq_resps_1_entry_ras_top_0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_rrd_ftq_resps_1_entry_ras_idx_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_1_entry_start_bank_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_1_valid_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_rrd_ftq_resps_1_pc_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_entry_cfi_idx_valid_0; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_rrd_ftq_resps_2_entry_cfi_idx_bits_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_entry_cfi_taken_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_entry_cfi_mispredicted_0; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_rrd_ftq_resps_2_entry_cfi_type_0; // @[fetch-target-queue.scala:82:7]
wire [7:0] io_rrd_ftq_resps_2_entry_br_mask_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_entry_cfi_is_call_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_entry_cfi_is_ret_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_entry_cfi_npc_plus4_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_rrd_ftq_resps_2_entry_ras_top_0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_rrd_ftq_resps_2_entry_ras_idx_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_entry_start_bank_0; // @[fetch-target-queue.scala:82:7]
wire io_rrd_ftq_resps_2_valid_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_rrd_ftq_resps_2_pc_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_debug_fetch_pc_0_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_debug_fetch_pc_1_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_debug_fetch_pc_2_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_cfi_idx_valid_0; // @[fetch-target-queue.scala:82:7]
wire [2:0] io_bpdupdate_bits_cfi_idx_bits_0; // @[fetch-target-queue.scala:82:7]
wire [63:0] io_bpdupdate_bits_ghist_old_history_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_ghist_current_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_ghist_new_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_ghist_new_saw_branch_taken_0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_bpdupdate_bits_ghist_ras_idx_0; // @[fetch-target-queue.scala:82:7]
wire [119:0] io_bpdupdate_bits_meta_0_0; // @[fetch-target-queue.scala:82:7]
wire [119:0] io_bpdupdate_bits_meta_1_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_is_mispredict_update_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_is_repair_update_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_bpdupdate_bits_pc_0; // @[fetch-target-queue.scala:82:7]
wire [7:0] io_bpdupdate_bits_br_mask_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_cfi_taken_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_cfi_mispredicted_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_cfi_is_br_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_bits_cfi_is_jal_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_bpdupdate_bits_target_0; // @[fetch-target-queue.scala:82:7]
wire io_bpdupdate_valid_0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_enq_idx_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_com_pc_0; // @[fetch-target-queue.scala:82:7]
wire io_ras_update_0; // @[fetch-target-queue.scala:82:7]
wire [4:0] io_ras_update_idx_0; // @[fetch-target-queue.scala:82:7]
wire [39:0] io_ras_update_pc_0; // @[fetch-target-queue.scala:82:7]
reg [4:0] bpd_ptr; // @[fetch-target-queue.scala:120:27]
reg [4:0] deq_ptr; // @[fetch-target-queue.scala:121:27]
reg [4:0] enq_ptr; // @[fetch-target-queue.scala:122:27]
assign io_enq_idx_0 = enq_ptr; // @[fetch-target-queue.scala:82:7, :122:27]
wire [5:0] _GEN = {1'h0, enq_ptr} + 6'h1; // @[util.scala:211:14]
wire [5:0] _full_T; // @[util.scala:211:14]
assign _full_T = _GEN; // @[util.scala:211:14]
wire [5:0] _full_T_7; // @[util.scala:211:14]
assign _full_T_7 = _GEN; // @[util.scala:211:14]
wire [5:0] _enq_ptr_T; // @[util.scala:211:14]
assign _enq_ptr_T = _GEN; // @[util.scala:211:14]
wire [4:0] _full_T_1 = _full_T[4:0]; // @[util.scala:211:14]
wire [4:0] _full_T_2 = _full_T_1; // @[util.scala:211:{14,20}]
wire [5:0] _full_T_3 = {1'h0, _full_T_2} + 6'h1; // @[util.scala:211:{14,20}]
wire [4:0] _full_T_4 = _full_T_3[4:0]; // @[util.scala:211:14]
wire [4:0] _full_T_5 = _full_T_4; // @[util.scala:211:{14,20}]
wire _full_T_6 = _full_T_5 == bpd_ptr; // @[util.scala:211:20]
wire [4:0] _full_T_8 = _full_T_7[4:0]; // @[util.scala:211:14]
wire [4:0] _full_T_9 = _full_T_8; // @[util.scala:211:{14,20}]
wire _full_T_10 = _full_T_9 == bpd_ptr; // @[util.scala:211:20]
wire full = _full_T_6 | _full_T_10; // @[fetch-target-queue.scala:124:{68,81}, :125:46]
reg [39:0] pcs_0; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_1; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_2; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_3; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_4; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_5; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_6; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_7; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_8; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_9; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_10; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_11; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_12; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_13; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_14; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_15; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_16; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_17; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_18; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_19; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_20; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_21; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_22; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_23; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_24; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_25; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_26; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_27; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_28; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_29; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_30; // @[fetch-target-queue.scala:128:21]
reg [39:0] pcs_31; // @[fetch-target-queue.scala:128:21]
assign io_bpdupdate_bits_meta_0_0 = _meta_R0_data[119:0]; // @[fetch-target-queue.scala:82:7, :129:29]
assign io_bpdupdate_bits_meta_1_0 = _meta_R0_data[239:120]; // @[fetch-target-queue.scala:82:7, :129:29]
reg ram_0_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_0_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_0_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_0_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_0_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_0_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_0_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_0_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_0_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_0_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_0_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_0_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_1_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_1_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_1_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_1_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_1_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_1_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_1_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_1_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_1_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_1_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_1_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_1_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_2_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_2_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_2_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_2_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_2_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_2_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_2_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_2_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_2_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_2_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_2_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_2_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_3_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_3_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_3_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_3_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_3_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_3_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_3_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_3_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_3_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_3_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_3_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_3_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_4_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_4_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_4_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_4_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_4_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_4_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_4_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_4_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_4_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_4_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_4_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_4_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_5_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_5_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_5_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_5_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_5_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_5_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_5_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_5_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_5_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_5_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_5_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_5_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_6_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_6_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_6_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_6_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_6_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_6_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_6_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_6_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_6_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_6_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_6_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_6_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_7_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_7_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_7_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_7_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_7_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_7_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_7_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_7_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_7_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_7_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_7_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_7_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_8_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_8_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_8_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_8_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_8_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_8_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_8_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_8_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_8_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_8_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_8_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_8_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_9_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_9_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_9_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_9_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_9_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_9_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_9_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_9_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_9_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_9_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_9_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_9_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_10_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_10_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_10_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_10_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_10_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_10_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_10_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_10_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_10_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_10_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_10_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_10_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_11_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_11_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_11_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_11_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_11_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_11_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_11_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_11_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_11_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_11_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_11_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_11_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_12_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_12_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_12_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_12_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_12_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_12_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_12_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_12_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_12_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_12_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_12_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_12_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_13_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_13_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_13_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_13_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_13_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_13_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_13_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_13_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_13_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_13_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_13_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_13_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_14_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_14_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_14_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_14_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_14_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_14_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_14_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_14_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_14_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_14_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_14_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_14_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_15_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_15_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_15_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_15_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_15_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_15_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_15_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_15_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_15_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_15_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_15_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_15_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_16_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_16_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_16_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_16_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_16_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_16_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_16_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_16_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_16_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_16_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_16_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_16_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_17_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_17_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_17_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_17_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_17_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_17_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_17_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_17_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_17_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_17_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_17_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_17_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_18_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_18_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_18_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_18_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_18_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_18_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_18_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_18_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_18_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_18_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_18_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_18_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_19_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_19_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_19_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_19_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_19_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_19_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_19_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_19_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_19_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_19_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_19_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_19_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_20_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_20_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_20_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_20_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_20_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_20_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_20_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_20_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_20_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_20_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_20_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_20_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_21_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_21_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_21_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_21_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_21_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_21_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_21_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_21_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_21_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_21_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_21_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_21_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_22_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_22_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_22_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_22_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_22_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_22_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_22_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_22_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_22_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_22_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_22_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_22_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_23_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_23_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_23_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_23_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_23_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_23_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_23_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_23_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_23_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_23_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_23_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_23_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_24_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_24_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_24_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_24_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_24_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_24_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_24_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_24_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_24_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_24_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_24_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_24_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_25_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_25_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_25_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_25_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_25_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_25_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_25_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_25_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_25_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_25_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_25_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_25_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_26_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_26_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_26_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_26_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_26_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_26_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_26_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_26_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_26_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_26_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_26_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_26_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_27_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_27_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_27_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_27_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_27_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_27_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_27_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_27_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_27_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_27_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_27_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_27_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_28_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_28_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_28_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_28_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_28_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_28_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_28_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_28_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_28_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_28_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_28_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_28_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_29_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_29_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_29_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_29_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_29_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_29_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_29_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_29_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_29_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_29_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_29_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_29_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_30_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_30_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_30_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_30_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_30_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_30_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_30_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_30_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_30_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_30_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_30_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_30_start_bank; // @[fetch-target-queue.scala:130:21]
reg ram_31_cfi_idx_valid; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_31_cfi_idx_bits; // @[fetch-target-queue.scala:130:21]
reg ram_31_cfi_taken; // @[fetch-target-queue.scala:130:21]
reg ram_31_cfi_mispredicted; // @[fetch-target-queue.scala:130:21]
reg [2:0] ram_31_cfi_type; // @[fetch-target-queue.scala:130:21]
reg [7:0] ram_31_br_mask; // @[fetch-target-queue.scala:130:21]
reg ram_31_cfi_is_call; // @[fetch-target-queue.scala:130:21]
reg ram_31_cfi_is_ret; // @[fetch-target-queue.scala:130:21]
reg ram_31_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21]
reg [39:0] ram_31_ras_top; // @[fetch-target-queue.scala:130:21]
reg [4:0] ram_31_ras_idx; // @[fetch-target-queue.scala:130:21]
reg ram_31_start_bank; // @[fetch-target-queue.scala:130:21]
wire [63:0] new_ghist_old_history; // @[fetch-target-queue.scala:165:24]
wire new_ghist_new_saw_branch_not_taken; // @[fetch-target-queue.scala:165:24]
wire new_ghist_new_saw_branch_taken; // @[fetch-target-queue.scala:165:24]
wire [4:0] new_ghist_ras_idx; // @[fetch-target-queue.scala:165:24]
wire [71:0] _GEN_0 = {new_ghist_ras_idx, new_ghist_new_saw_branch_taken, new_ghist_new_saw_branch_not_taken, new_ghist_current_saw_branch_not_taken, new_ghist_old_history}; // @[fetch-target-queue.scala:131:43, :165:24]
assign io_bpdupdate_bits_ghist_old_history_0 = _ghist_0_R0_data[63:0]; // @[fetch-target-queue.scala:82:7, :131:43]
assign io_bpdupdate_bits_ghist_current_saw_branch_not_taken_0 = _ghist_0_R0_data[64]; // @[fetch-target-queue.scala:82:7, :131:43]
assign io_bpdupdate_bits_ghist_new_saw_branch_not_taken_0 = _ghist_0_R0_data[65]; // @[fetch-target-queue.scala:82:7, :131:43]
assign io_bpdupdate_bits_ghist_new_saw_branch_taken_0 = _ghist_0_R0_data[66]; // @[fetch-target-queue.scala:82:7, :131:43]
assign io_bpdupdate_bits_ghist_ras_idx_0 = _ghist_0_R0_data[71:67]; // @[fetch-target-queue.scala:82:7, :131:43]
assign io_rrd_ftq_resps_0_ghist_old_history_0 = _ghist_1_R0_data[63:0]; // @[fetch-target-queue.scala:82:7, :131:43]
assign io_rrd_ftq_resps_0_ghist_current_saw_branch_not_taken_0 = _ghist_1_R0_data[64]; // @[fetch-target-queue.scala:82:7, :131:43]
assign io_rrd_ftq_resps_0_ghist_new_saw_branch_not_taken_0 = _ghist_1_R0_data[65]; // @[fetch-target-queue.scala:82:7, :131:43]
assign io_rrd_ftq_resps_0_ghist_new_saw_branch_taken_0 = _ghist_1_R0_data[66]; // @[fetch-target-queue.scala:82:7, :131:43]
assign io_rrd_ftq_resps_0_ghist_ras_idx_0 = _ghist_1_R0_data[71:67]; // @[fetch-target-queue.scala:82:7, :131:43]
wire _GEN_1 = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35]
wire do_enq; // @[Decoupled.scala:51:35]
assign do_enq = _GEN_1; // @[Decoupled.scala:51:35]
wire _is_enq_T_1; // @[Decoupled.scala:51:35]
assign _is_enq_T_1 = _GEN_1; // @[Decoupled.scala:51:35]
wire _is_enq_T_3; // @[Decoupled.scala:51:35]
assign _is_enq_T_3 = _GEN_1; // @[Decoupled.scala:51:35]
wire _is_enq_T_5; // @[Decoupled.scala:51:35]
assign _is_enq_T_5 = _GEN_1; // @[Decoupled.scala:51:35]
reg [63:0] prev_ghist_old_history; // @[fetch-target-queue.scala:142:27]
reg prev_ghist_current_saw_branch_not_taken; // @[fetch-target-queue.scala:142:27]
reg prev_ghist_new_saw_branch_not_taken; // @[fetch-target-queue.scala:142:27]
reg prev_ghist_new_saw_branch_taken; // @[fetch-target-queue.scala:142:27]
reg [4:0] prev_ghist_ras_idx; // @[fetch-target-queue.scala:142:27]
reg prev_entry_cfi_idx_valid; // @[fetch-target-queue.scala:143:27]
reg [2:0] prev_entry_cfi_idx_bits; // @[fetch-target-queue.scala:143:27]
wire [2:0] new_ghist_cfi_idx_fixed = prev_entry_cfi_idx_bits; // @[frontend.scala:72:32]
reg prev_entry_cfi_taken; // @[fetch-target-queue.scala:143:27]
reg prev_entry_cfi_mispredicted; // @[fetch-target-queue.scala:143:27]
reg [2:0] prev_entry_cfi_type; // @[fetch-target-queue.scala:143:27]
reg [7:0] prev_entry_br_mask; // @[fetch-target-queue.scala:143:27]
reg prev_entry_cfi_is_call; // @[fetch-target-queue.scala:143:27]
reg prev_entry_cfi_is_ret; // @[fetch-target-queue.scala:143:27]
reg prev_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:143:27]
reg [39:0] prev_entry_ras_top; // @[fetch-target-queue.scala:143:27]
reg [4:0] prev_entry_ras_idx; // @[fetch-target-queue.scala:143:27]
reg prev_entry_start_bank; // @[fetch-target-queue.scala:143:27]
reg [39:0] prev_pc; // @[fetch-target-queue.scala:144:27]
wire [7:0] _new_entry_br_mask_T; // @[fetch-target-queue.scala:162:52]
wire _new_entry_start_bank_T; // @[frontend.scala:137:47]
wire [7:0] new_entry_br_mask; // @[fetch-target-queue.scala:149:25]
wire new_entry_start_bank; // @[fetch-target-queue.scala:149:25]
assign _new_entry_br_mask_T = io_enq_bits_br_mask_0 & io_enq_bits_mask_0; // @[fetch-target-queue.scala:82:7, :162:52]
assign new_entry_br_mask = _new_entry_br_mask_T; // @[fetch-target-queue.scala:149:25, :162:52]
assign _new_entry_start_bank_T = io_enq_bits_pc_0[3]; // @[frontend.scala:137:47]
assign new_entry_start_bank = _new_entry_start_bank_T; // @[frontend.scala:137:47]
wire [7:0] _new_ghist_T = prev_entry_br_mask >> prev_entry_cfi_idx_bits; // @[fetch-target-queue.scala:143:27, :170:27]
wire _new_ghist_T_1 = _new_ghist_T[0]; // @[fetch-target-queue.scala:170:27]
wire [7:0] new_ghist_cfi_idx_oh = 8'h1 << new_ghist_cfi_idx_fixed; // @[OneHot.scala:58:35]
wire [7:0] _new_ghist_not_taken_branches_T = new_ghist_cfi_idx_oh; // @[OneHot.scala:58:35]
wire [4:0] _new_ghist_new_history_ras_idx_T_9; // @[frontend.scala:110:31]
wire [63:0] new_ghist_new_history_old_history; // @[frontend.scala:74:27]
wire new_ghist_new_history_new_saw_branch_not_taken; // @[frontend.scala:74:27]
wire new_ghist_new_history_new_saw_branch_taken; // @[frontend.scala:74:27]
wire [4:0] new_ghist_new_history_ras_idx; // @[frontend.scala:74:27]
wire [7:0] _new_ghist_not_taken_branches_T_1 = {1'h0, new_ghist_cfi_idx_oh[7:1]}; // @[OneHot.scala:58:35]
wire [7:0] _new_ghist_not_taken_branches_T_2 = {2'h0, new_ghist_cfi_idx_oh[7:2]}; // @[OneHot.scala:58:35]
wire [7:0] _new_ghist_not_taken_branches_T_3 = {3'h0, new_ghist_cfi_idx_oh[7:3]}; // @[OneHot.scala:58:35]
wire [7:0] _new_ghist_not_taken_branches_T_4 = {4'h0, new_ghist_cfi_idx_oh[7:4]}; // @[OneHot.scala:58:35]
wire [7:0] _new_ghist_not_taken_branches_T_5 = {5'h0, new_ghist_cfi_idx_oh[7:5]}; // @[OneHot.scala:58:35]
wire [7:0] _new_ghist_not_taken_branches_T_6 = {6'h0, new_ghist_cfi_idx_oh[7:6]}; // @[OneHot.scala:58:35]
wire [7:0] _new_ghist_not_taken_branches_T_7 = {7'h0, new_ghist_cfi_idx_oh[7]}; // @[OneHot.scala:58:35]
wire [7:0] _new_ghist_not_taken_branches_T_8 = _new_ghist_not_taken_branches_T | _new_ghist_not_taken_branches_T_1; // @[util.scala:383:{29,45}]
wire [7:0] _new_ghist_not_taken_branches_T_9 = _new_ghist_not_taken_branches_T_8 | _new_ghist_not_taken_branches_T_2; // @[util.scala:383:{29,45}]
wire [7:0] _new_ghist_not_taken_branches_T_10 = _new_ghist_not_taken_branches_T_9 | _new_ghist_not_taken_branches_T_3; // @[util.scala:383:{29,45}]
wire [7:0] _new_ghist_not_taken_branches_T_11 = _new_ghist_not_taken_branches_T_10 | _new_ghist_not_taken_branches_T_4; // @[util.scala:383:{29,45}]
wire [7:0] _new_ghist_not_taken_branches_T_12 = _new_ghist_not_taken_branches_T_11 | _new_ghist_not_taken_branches_T_5; // @[util.scala:383:{29,45}]
wire [7:0] _new_ghist_not_taken_branches_T_13 = _new_ghist_not_taken_branches_T_12 | _new_ghist_not_taken_branches_T_6; // @[util.scala:383:{29,45}]
wire [7:0] _new_ghist_not_taken_branches_T_14 = _new_ghist_not_taken_branches_T_13 | _new_ghist_not_taken_branches_T_7; // @[util.scala:383:{29,45}]
wire _new_ghist_not_taken_branches_T_15 = _new_ghist_T_1 & prev_entry_cfi_taken; // @[frontend.scala:77:84]
wire [7:0] _new_ghist_not_taken_branches_T_16 = _new_ghist_not_taken_branches_T_15 ? new_ghist_cfi_idx_oh : 8'h0; // @[OneHot.scala:58:35]
wire [7:0] _new_ghist_not_taken_branches_T_17 = ~_new_ghist_not_taken_branches_T_16; // @[frontend.scala:77:{69,73}]
wire [7:0] _new_ghist_not_taken_branches_T_18 = _new_ghist_not_taken_branches_T_14 & _new_ghist_not_taken_branches_T_17; // @[util.scala:383:45]
wire [7:0] _new_ghist_not_taken_branches_T_20 = prev_entry_cfi_idx_valid ? _new_ghist_not_taken_branches_T_18 : 8'hFF; // @[frontend.scala:76:44, :77:67]
wire [7:0] new_ghist_not_taken_branches = prev_entry_br_mask & _new_ghist_not_taken_branches_T_20; // @[frontend.scala:76:{39,44}]
wire [64:0] _GEN_2 = {prev_ghist_old_history, 1'h0}; // @[frontend.scala:53:75]
wire [64:0] _new_ghist_base_T; // @[frontend.scala:53:75]
assign _new_ghist_base_T = _GEN_2; // @[frontend.scala:53:75]
wire [64:0] _new_ghist_base_T_2; // @[frontend.scala:54:75]
assign _new_ghist_base_T_2 = _GEN_2; // @[frontend.scala:53:75, :54:75]
wire [64:0] _new_ghist_new_history_old_history_T; // @[frontend.scala:53:75]
assign _new_ghist_new_history_old_history_T = _GEN_2; // @[frontend.scala:53:75]
wire [64:0] _new_ghist_new_history_old_history_T_2; // @[frontend.scala:54:75]
assign _new_ghist_new_history_old_history_T_2 = _GEN_2; // @[frontend.scala:53:75, :54:75]
wire [64:0] _new_ghist_new_history_old_history_T_6; // @[frontend.scala:53:75]
assign _new_ghist_new_history_old_history_T_6 = _GEN_2; // @[frontend.scala:53:75]
wire [64:0] _new_ghist_new_history_old_history_T_8; // @[frontend.scala:54:75]
assign _new_ghist_new_history_old_history_T_8 = _GEN_2; // @[frontend.scala:53:75, :54:75]
wire [64:0] _new_ghist_new_history_old_history_T_13; // @[frontend.scala:53:75]
assign _new_ghist_new_history_old_history_T_13 = _GEN_2; // @[frontend.scala:53:75]
wire [64:0] _new_ghist_new_history_old_history_T_15; // @[frontend.scala:54:75]
assign _new_ghist_new_history_old_history_T_15 = _GEN_2; // @[frontend.scala:53:75, :54:75]
wire [64:0] _new_ghist_new_history_old_history_T_19; // @[frontend.scala:53:75]
assign _new_ghist_new_history_old_history_T_19 = _GEN_2; // @[frontend.scala:53:75]
wire [64:0] _new_ghist_new_history_old_history_T_21; // @[frontend.scala:54:75]
assign _new_ghist_new_history_old_history_T_21 = _GEN_2; // @[frontend.scala:53:75, :54:75]
wire [64:0] _new_ghist_base_T_1 = {_new_ghist_base_T[64:1], 1'h1}; // @[frontend.scala:53:{75,80}]
wire [64:0] _GEN_3 = {1'h0, prev_ghist_old_history}; // @[frontend.scala:54:12]
wire [64:0] _new_ghist_base_T_3 = prev_ghist_new_saw_branch_not_taken ? _new_ghist_base_T_2 : _GEN_3; // @[frontend.scala:54:{12,75}]
wire [64:0] new_ghist_base = prev_ghist_new_saw_branch_taken ? _new_ghist_base_T_1 : _new_ghist_base_T_3; // @[frontend.scala:53:{12,80}, :54:12]
wire _GEN_4 = prev_entry_cfi_idx_valid & prev_entry_cfi_taken; // @[frontend.scala:91:37]
wire _new_ghist_cfi_in_bank_0_T; // @[frontend.scala:91:37]
assign _new_ghist_cfi_in_bank_0_T = _GEN_4; // @[frontend.scala:91:37]
wire _new_ghist_new_history_new_saw_branch_taken_T_1; // @[frontend.scala:106:59]
assign _new_ghist_new_history_new_saw_branch_taken_T_1 = _GEN_4; // @[frontend.scala:91:37, :106:59]
wire _new_ghist_cfi_in_bank_0_T_1 = ~(new_ghist_cfi_idx_fixed[2]); // @[frontend.scala:72:32, :91:67]
wire new_ghist_cfi_in_bank_0 = _new_ghist_cfi_in_bank_0_T & _new_ghist_cfi_in_bank_0_T_1; // @[frontend.scala:91:{37,50,67}]
wire [2:0] _new_ghist_ignore_second_bank_T = prev_pc[5:3]; // @[frontend.scala:139:28]
wire _new_ghist_ignore_second_bank_T_1 = &_new_ghist_ignore_second_bank_T; // @[frontend.scala:139:{28,66}]
wire _new_ghist_ignore_second_bank_T_2 = _new_ghist_ignore_second_bank_T_1; // @[frontend.scala:139:{21,66}]
wire new_ghist_ignore_second_bank = new_ghist_cfi_in_bank_0 | _new_ghist_ignore_second_bank_T_2; // @[frontend.scala:91:50, :92:46, :139:21]
wire [3:0] _new_ghist_first_bank_saw_not_taken_T = new_ghist_not_taken_branches[3:0]; // @[frontend.scala:76:39, :94:56]
wire _new_ghist_first_bank_saw_not_taken_T_1 = |_new_ghist_first_bank_saw_not_taken_T; // @[frontend.scala:94:{56,72}]
wire new_ghist_first_bank_saw_not_taken = _new_ghist_first_bank_saw_not_taken_T_1 | prev_ghist_current_saw_branch_not_taken; // @[frontend.scala:94:{72,80}]
wire [64:0] _new_ghist_new_history_old_history_T_1 = {_new_ghist_new_history_old_history_T[64:1], 1'h1}; // @[frontend.scala:53:{75,80}]
wire [64:0] _new_ghist_new_history_old_history_T_3 = prev_ghist_new_saw_branch_not_taken ? _new_ghist_new_history_old_history_T_2 : _GEN_3; // @[frontend.scala:54:{12,75}]
wire [64:0] _new_ghist_new_history_old_history_T_4 = prev_ghist_new_saw_branch_taken ? _new_ghist_new_history_old_history_T_1 : _new_ghist_new_history_old_history_T_3; // @[frontend.scala:53:{12,80}, :54:12]
wire _GEN_5 = _new_ghist_T_1 & new_ghist_cfi_in_bank_0; // @[frontend.scala:91:50, :99:59]
wire _new_ghist_new_history_new_saw_branch_taken_T; // @[frontend.scala:99:59]
assign _new_ghist_new_history_new_saw_branch_taken_T = _GEN_5; // @[frontend.scala:99:59]
wire _new_ghist_new_history_old_history_T_5; // @[frontend.scala:101:50]
assign _new_ghist_new_history_old_history_T_5 = _GEN_5; // @[frontend.scala:99:59, :101:50]
wire [64:0] _new_ghist_new_history_old_history_T_7 = {_new_ghist_new_history_old_history_T_6[64:1], 1'h1}; // @[frontend.scala:53:{75,80}]
wire [64:0] _new_ghist_new_history_old_history_T_9 = prev_ghist_new_saw_branch_not_taken ? _new_ghist_new_history_old_history_T_8 : _GEN_3; // @[frontend.scala:54:{12,75}]
wire [64:0] _new_ghist_new_history_old_history_T_10 = prev_ghist_new_saw_branch_taken ? _new_ghist_new_history_old_history_T_7 : _new_ghist_new_history_old_history_T_9; // @[frontend.scala:53:{12,80}, :54:12]
wire [65:0] _new_ghist_new_history_old_history_T_11 = {_new_ghist_new_history_old_history_T_10, 1'h0}; // @[frontend.scala:53:12, :101:110]
wire [65:0] _new_ghist_new_history_old_history_T_12 = {_new_ghist_new_history_old_history_T_11[65:1], 1'h1}; // @[frontend.scala:101:{110,115}]
wire [64:0] _new_ghist_new_history_old_history_T_14 = {_new_ghist_new_history_old_history_T_13[64:1], 1'h1}; // @[frontend.scala:53:{75,80}]
wire [64:0] _new_ghist_new_history_old_history_T_16 = prev_ghist_new_saw_branch_not_taken ? _new_ghist_new_history_old_history_T_15 : _GEN_3; // @[frontend.scala:54:{12,75}]
wire [64:0] _new_ghist_new_history_old_history_T_17 = prev_ghist_new_saw_branch_taken ? _new_ghist_new_history_old_history_T_14 : _new_ghist_new_history_old_history_T_16; // @[frontend.scala:53:{12,80}, :54:12]
wire [65:0] _new_ghist_new_history_old_history_T_18 = {_new_ghist_new_history_old_history_T_17, 1'h0}; // @[frontend.scala:53:12, :102:110]
wire [64:0] _new_ghist_new_history_old_history_T_20 = {_new_ghist_new_history_old_history_T_19[64:1], 1'h1}; // @[frontend.scala:53:{75,80}]
wire [64:0] _new_ghist_new_history_old_history_T_22 = prev_ghist_new_saw_branch_not_taken ? _new_ghist_new_history_old_history_T_21 : _GEN_3; // @[frontend.scala:54:{12,75}]
wire [64:0] _new_ghist_new_history_old_history_T_23 = prev_ghist_new_saw_branch_taken ? _new_ghist_new_history_old_history_T_20 : _new_ghist_new_history_old_history_T_22; // @[frontend.scala:53:{12,80}, :54:12]
wire [65:0] _new_ghist_new_history_old_history_T_24 = new_ghist_first_bank_saw_not_taken ? _new_ghist_new_history_old_history_T_18 : {1'h0, _new_ghist_new_history_old_history_T_23}; // @[frontend.scala:53:12, :94:80, :102:{39,110}]
wire [65:0] _new_ghist_new_history_old_history_T_25 = _new_ghist_new_history_old_history_T_5 ? _new_ghist_new_history_old_history_T_12 : _new_ghist_new_history_old_history_T_24; // @[frontend.scala:101:{39,50,115}, :102:39]
assign new_ghist_new_history_old_history = new_ghist_ignore_second_bank ? _new_ghist_new_history_old_history_T_4[63:0] : _new_ghist_new_history_old_history_T_25[63:0]; // @[frontend.scala:53:12, :74:27, :92:46, :96:33, :97:33, :101:{33,39}]
wire [3:0] _new_ghist_new_history_new_saw_branch_not_taken_T = new_ghist_not_taken_branches[7:4]; // @[frontend.scala:76:39, :105:67]
wire _new_ghist_new_history_new_saw_branch_not_taken_T_1 = |_new_ghist_new_history_new_saw_branch_not_taken_T; // @[frontend.scala:105:{67,92}]
assign new_ghist_new_history_new_saw_branch_not_taken = new_ghist_ignore_second_bank ? new_ghist_first_bank_saw_not_taken : _new_ghist_new_history_new_saw_branch_not_taken_T_1; // @[frontend.scala:74:27, :92:46, :94:80, :96:33, :98:46, :105:{46,92}]
wire _new_ghist_new_history_new_saw_branch_taken_T_2 = _new_ghist_new_history_new_saw_branch_taken_T_1 & _new_ghist_T_1; // @[frontend.scala:106:{59,72}]
wire _new_ghist_new_history_new_saw_branch_taken_T_3 = ~new_ghist_cfi_in_bank_0; // @[frontend.scala:91:50, :106:88]
wire _new_ghist_new_history_new_saw_branch_taken_T_4 = _new_ghist_new_history_new_saw_branch_taken_T_2 & _new_ghist_new_history_new_saw_branch_taken_T_3; // @[frontend.scala:106:{72,85,88}]
assign new_ghist_new_history_new_saw_branch_taken = new_ghist_ignore_second_bank ? _new_ghist_new_history_new_saw_branch_taken_T : _new_ghist_new_history_new_saw_branch_taken_T_4; // @[frontend.scala:74:27, :92:46, :96:33, :99:{46,59}, :106:{46,85}]
wire _new_ghist_new_history_ras_idx_T = prev_entry_cfi_idx_valid & prev_entry_cfi_is_call; // @[frontend.scala:110:42]
wire [5:0] _GEN_6 = {1'h0, prev_ghist_ras_idx}; // @[util.scala:211:14]
wire [5:0] _new_ghist_new_history_ras_idx_T_1 = _GEN_6 + 6'h1; // @[util.scala:211:14]
wire [4:0] _new_ghist_new_history_ras_idx_T_2 = _new_ghist_new_history_ras_idx_T_1[4:0]; // @[util.scala:211:14]
wire [4:0] _new_ghist_new_history_ras_idx_T_3 = _new_ghist_new_history_ras_idx_T_2; // @[util.scala:211:{14,20}]
wire _new_ghist_new_history_ras_idx_T_4 = prev_entry_cfi_idx_valid & prev_entry_cfi_is_ret; // @[frontend.scala:111:42]
wire [5:0] _new_ghist_new_history_ras_idx_T_5 = _GEN_6 - 6'h1; // @[util.scala:211:14, :228:14]
wire [4:0] _new_ghist_new_history_ras_idx_T_6 = _new_ghist_new_history_ras_idx_T_5[4:0]; // @[util.scala:228:14]
wire [4:0] _new_ghist_new_history_ras_idx_T_7 = _new_ghist_new_history_ras_idx_T_6; // @[util.scala:228:{14,20}]
wire [4:0] _new_ghist_new_history_ras_idx_T_8 = _new_ghist_new_history_ras_idx_T_4 ? _new_ghist_new_history_ras_idx_T_7 : prev_ghist_ras_idx; // @[util.scala:228:20]
assign _new_ghist_new_history_ras_idx_T_9 = _new_ghist_new_history_ras_idx_T ? _new_ghist_new_history_ras_idx_T_3 : _new_ghist_new_history_ras_idx_T_8; // @[util.scala:211:20]
assign new_ghist_new_history_ras_idx = _new_ghist_new_history_ras_idx_T_9; // @[frontend.scala:74:27, :110:31]
assign new_ghist_old_history = io_enq_bits_ghist_current_saw_branch_not_taken_0 ? io_enq_bits_ghist_old_history_0 : new_ghist_new_history_old_history; // @[frontend.scala:74:27]
assign new_ghist_new_saw_branch_not_taken = io_enq_bits_ghist_current_saw_branch_not_taken_0 ? io_enq_bits_ghist_new_saw_branch_not_taken_0 : new_ghist_new_history_new_saw_branch_not_taken; // @[frontend.scala:74:27]
assign new_ghist_new_saw_branch_taken = io_enq_bits_ghist_current_saw_branch_not_taken_0 ? io_enq_bits_ghist_new_saw_branch_taken_0 : new_ghist_new_history_new_saw_branch_taken; // @[frontend.scala:74:27]
assign new_ghist_ras_idx = io_enq_bits_ghist_current_saw_branch_not_taken_0 ? io_enq_bits_ghist_ras_idx_0 : new_ghist_new_history_ras_idx; // @[frontend.scala:74:27]
wire [4:0] _enq_ptr_T_1 = _enq_ptr_T[4:0]; // @[util.scala:211:14]
wire [4:0] _enq_ptr_T_2 = _enq_ptr_T_1; // @[util.scala:211:{14,20}]
wire [4:0] _io_com_pc_T = io_deq_valid_0 ? io_deq_bits_0 : deq_ptr; // @[fetch-target-queue.scala:82:7, :121:27, :196:23, :197:13, :346:31]
reg first_empty; // @[fetch-target-queue.scala:201:28]
wire [39:0] ras_update_pc; // @[fetch-target-queue.scala:207:31]
wire [4:0] ras_update_idx; // @[fetch-target-queue.scala:208:32]
reg io_ras_update_REG; // @[fetch-target-queue.scala:209:31]
assign io_ras_update_0 = io_ras_update_REG; // @[fetch-target-queue.scala:82:7, :209:31]
reg [39:0] io_ras_update_pc_REG; // @[fetch-target-queue.scala:210:31]
assign io_ras_update_pc_0 = io_ras_update_pc_REG; // @[fetch-target-queue.scala:82:7, :210:31]
reg [4:0] io_ras_update_idx_REG; // @[fetch-target-queue.scala:211:31]
assign io_ras_update_idx_0 = io_ras_update_idx_REG; // @[fetch-target-queue.scala:82:7, :211:31]
reg bpd_update_mispredict; // @[fetch-target-queue.scala:213:38]
reg bpd_update_repair; // @[fetch-target-queue.scala:214:34]
reg [4:0] bpd_repair_idx; // @[fetch-target-queue.scala:215:27]
reg [4:0] bpd_end_idx; // @[fetch-target-queue.scala:216:24]
reg [39:0] bpd_repair_pc; // @[fetch-target-queue.scala:217:26]
wire _bpd_idx_T = bpd_update_repair | bpd_update_mispredict; // @[fetch-target-queue.scala:213:38, :214:34, :220:27]
wire [4:0] _bpd_idx_T_1 = _bpd_idx_T ? bpd_repair_idx : bpd_ptr; // @[fetch-target-queue.scala:120:27, :215:27, :220:{8,27}]
wire [4:0] bpd_idx = io_redirect_valid_0 ? io_redirect_bits_0 : _bpd_idx_T_1; // @[fetch-target-queue.scala:82:7, :219:20, :220:8]
wire [4:0] _bpd_ghist_WIRE = bpd_idx; // @[fetch-target-queue.scala:219:20, :222:32]
wire [4:0] _bpd_meta_WIRE = bpd_idx; // @[fetch-target-queue.scala:219:20, :228:28]
reg bpd_entry_cfi_idx_valid; // @[fetch-target-queue.scala:221:26]
assign io_bpdupdate_bits_cfi_idx_valid_0 = bpd_entry_cfi_idx_valid; // @[fetch-target-queue.scala:82:7, :221:26]
reg [2:0] bpd_entry_cfi_idx_bits; // @[fetch-target-queue.scala:221:26]
assign io_bpdupdate_bits_cfi_idx_bits_0 = bpd_entry_cfi_idx_bits; // @[fetch-target-queue.scala:82:7, :221:26]
reg bpd_entry_cfi_taken; // @[fetch-target-queue.scala:221:26]
assign io_bpdupdate_bits_cfi_taken_0 = bpd_entry_cfi_taken; // @[fetch-target-queue.scala:82:7, :221:26]
reg bpd_entry_cfi_mispredicted; // @[fetch-target-queue.scala:221:26]
assign io_bpdupdate_bits_cfi_mispredicted_0 = bpd_entry_cfi_mispredicted; // @[fetch-target-queue.scala:82:7, :221:26]
reg [2:0] bpd_entry_cfi_type; // @[fetch-target-queue.scala:221:26]
reg [7:0] bpd_entry_br_mask; // @[fetch-target-queue.scala:221:26]
reg bpd_entry_cfi_is_call; // @[fetch-target-queue.scala:221:26]
reg bpd_entry_cfi_is_ret; // @[fetch-target-queue.scala:221:26]
reg bpd_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:221:26]
reg [39:0] bpd_entry_ras_top; // @[fetch-target-queue.scala:221:26]
reg [4:0] bpd_entry_ras_idx; // @[fetch-target-queue.scala:221:26]
reg bpd_entry_start_bank; // @[fetch-target-queue.scala:221:26]
wire [31:0] _GEN_7 = {{ram_31_cfi_idx_valid}, {ram_30_cfi_idx_valid}, {ram_29_cfi_idx_valid}, {ram_28_cfi_idx_valid}, {ram_27_cfi_idx_valid}, {ram_26_cfi_idx_valid}, {ram_25_cfi_idx_valid}, {ram_24_cfi_idx_valid}, {ram_23_cfi_idx_valid}, {ram_22_cfi_idx_valid}, {ram_21_cfi_idx_valid}, {ram_20_cfi_idx_valid}, {ram_19_cfi_idx_valid}, {ram_18_cfi_idx_valid}, {ram_17_cfi_idx_valid}, {ram_16_cfi_idx_valid}, {ram_15_cfi_idx_valid}, {ram_14_cfi_idx_valid}, {ram_13_cfi_idx_valid}, {ram_12_cfi_idx_valid}, {ram_11_cfi_idx_valid}, {ram_10_cfi_idx_valid}, {ram_9_cfi_idx_valid}, {ram_8_cfi_idx_valid}, {ram_7_cfi_idx_valid}, {ram_6_cfi_idx_valid}, {ram_5_cfi_idx_valid}, {ram_4_cfi_idx_valid}, {ram_3_cfi_idx_valid}, {ram_2_cfi_idx_valid}, {ram_1_cfi_idx_valid}, {ram_0_cfi_idx_valid}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0][2:0] _GEN_8 = {{ram_31_cfi_idx_bits}, {ram_30_cfi_idx_bits}, {ram_29_cfi_idx_bits}, {ram_28_cfi_idx_bits}, {ram_27_cfi_idx_bits}, {ram_26_cfi_idx_bits}, {ram_25_cfi_idx_bits}, {ram_24_cfi_idx_bits}, {ram_23_cfi_idx_bits}, {ram_22_cfi_idx_bits}, {ram_21_cfi_idx_bits}, {ram_20_cfi_idx_bits}, {ram_19_cfi_idx_bits}, {ram_18_cfi_idx_bits}, {ram_17_cfi_idx_bits}, {ram_16_cfi_idx_bits}, {ram_15_cfi_idx_bits}, {ram_14_cfi_idx_bits}, {ram_13_cfi_idx_bits}, {ram_12_cfi_idx_bits}, {ram_11_cfi_idx_bits}, {ram_10_cfi_idx_bits}, {ram_9_cfi_idx_bits}, {ram_8_cfi_idx_bits}, {ram_7_cfi_idx_bits}, {ram_6_cfi_idx_bits}, {ram_5_cfi_idx_bits}, {ram_4_cfi_idx_bits}, {ram_3_cfi_idx_bits}, {ram_2_cfi_idx_bits}, {ram_1_cfi_idx_bits}, {ram_0_cfi_idx_bits}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0] _GEN_9 = {{ram_31_cfi_taken}, {ram_30_cfi_taken}, {ram_29_cfi_taken}, {ram_28_cfi_taken}, {ram_27_cfi_taken}, {ram_26_cfi_taken}, {ram_25_cfi_taken}, {ram_24_cfi_taken}, {ram_23_cfi_taken}, {ram_22_cfi_taken}, {ram_21_cfi_taken}, {ram_20_cfi_taken}, {ram_19_cfi_taken}, {ram_18_cfi_taken}, {ram_17_cfi_taken}, {ram_16_cfi_taken}, {ram_15_cfi_taken}, {ram_14_cfi_taken}, {ram_13_cfi_taken}, {ram_12_cfi_taken}, {ram_11_cfi_taken}, {ram_10_cfi_taken}, {ram_9_cfi_taken}, {ram_8_cfi_taken}, {ram_7_cfi_taken}, {ram_6_cfi_taken}, {ram_5_cfi_taken}, {ram_4_cfi_taken}, {ram_3_cfi_taken}, {ram_2_cfi_taken}, {ram_1_cfi_taken}, {ram_0_cfi_taken}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0] _GEN_10 = {{ram_31_cfi_mispredicted}, {ram_30_cfi_mispredicted}, {ram_29_cfi_mispredicted}, {ram_28_cfi_mispredicted}, {ram_27_cfi_mispredicted}, {ram_26_cfi_mispredicted}, {ram_25_cfi_mispredicted}, {ram_24_cfi_mispredicted}, {ram_23_cfi_mispredicted}, {ram_22_cfi_mispredicted}, {ram_21_cfi_mispredicted}, {ram_20_cfi_mispredicted}, {ram_19_cfi_mispredicted}, {ram_18_cfi_mispredicted}, {ram_17_cfi_mispredicted}, {ram_16_cfi_mispredicted}, {ram_15_cfi_mispredicted}, {ram_14_cfi_mispredicted}, {ram_13_cfi_mispredicted}, {ram_12_cfi_mispredicted}, {ram_11_cfi_mispredicted}, {ram_10_cfi_mispredicted}, {ram_9_cfi_mispredicted}, {ram_8_cfi_mispredicted}, {ram_7_cfi_mispredicted}, {ram_6_cfi_mispredicted}, {ram_5_cfi_mispredicted}, {ram_4_cfi_mispredicted}, {ram_3_cfi_mispredicted}, {ram_2_cfi_mispredicted}, {ram_1_cfi_mispredicted}, {ram_0_cfi_mispredicted}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0][2:0] _GEN_11 = {{ram_31_cfi_type}, {ram_30_cfi_type}, {ram_29_cfi_type}, {ram_28_cfi_type}, {ram_27_cfi_type}, {ram_26_cfi_type}, {ram_25_cfi_type}, {ram_24_cfi_type}, {ram_23_cfi_type}, {ram_22_cfi_type}, {ram_21_cfi_type}, {ram_20_cfi_type}, {ram_19_cfi_type}, {ram_18_cfi_type}, {ram_17_cfi_type}, {ram_16_cfi_type}, {ram_15_cfi_type}, {ram_14_cfi_type}, {ram_13_cfi_type}, {ram_12_cfi_type}, {ram_11_cfi_type}, {ram_10_cfi_type}, {ram_9_cfi_type}, {ram_8_cfi_type}, {ram_7_cfi_type}, {ram_6_cfi_type}, {ram_5_cfi_type}, {ram_4_cfi_type}, {ram_3_cfi_type}, {ram_2_cfi_type}, {ram_1_cfi_type}, {ram_0_cfi_type}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0][7:0] _GEN_12 = {{ram_31_br_mask}, {ram_30_br_mask}, {ram_29_br_mask}, {ram_28_br_mask}, {ram_27_br_mask}, {ram_26_br_mask}, {ram_25_br_mask}, {ram_24_br_mask}, {ram_23_br_mask}, {ram_22_br_mask}, {ram_21_br_mask}, {ram_20_br_mask}, {ram_19_br_mask}, {ram_18_br_mask}, {ram_17_br_mask}, {ram_16_br_mask}, {ram_15_br_mask}, {ram_14_br_mask}, {ram_13_br_mask}, {ram_12_br_mask}, {ram_11_br_mask}, {ram_10_br_mask}, {ram_9_br_mask}, {ram_8_br_mask}, {ram_7_br_mask}, {ram_6_br_mask}, {ram_5_br_mask}, {ram_4_br_mask}, {ram_3_br_mask}, {ram_2_br_mask}, {ram_1_br_mask}, {ram_0_br_mask}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0] _GEN_13 = {{ram_31_cfi_is_call}, {ram_30_cfi_is_call}, {ram_29_cfi_is_call}, {ram_28_cfi_is_call}, {ram_27_cfi_is_call}, {ram_26_cfi_is_call}, {ram_25_cfi_is_call}, {ram_24_cfi_is_call}, {ram_23_cfi_is_call}, {ram_22_cfi_is_call}, {ram_21_cfi_is_call}, {ram_20_cfi_is_call}, {ram_19_cfi_is_call}, {ram_18_cfi_is_call}, {ram_17_cfi_is_call}, {ram_16_cfi_is_call}, {ram_15_cfi_is_call}, {ram_14_cfi_is_call}, {ram_13_cfi_is_call}, {ram_12_cfi_is_call}, {ram_11_cfi_is_call}, {ram_10_cfi_is_call}, {ram_9_cfi_is_call}, {ram_8_cfi_is_call}, {ram_7_cfi_is_call}, {ram_6_cfi_is_call}, {ram_5_cfi_is_call}, {ram_4_cfi_is_call}, {ram_3_cfi_is_call}, {ram_2_cfi_is_call}, {ram_1_cfi_is_call}, {ram_0_cfi_is_call}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0] _GEN_14 = {{ram_31_cfi_is_ret}, {ram_30_cfi_is_ret}, {ram_29_cfi_is_ret}, {ram_28_cfi_is_ret}, {ram_27_cfi_is_ret}, {ram_26_cfi_is_ret}, {ram_25_cfi_is_ret}, {ram_24_cfi_is_ret}, {ram_23_cfi_is_ret}, {ram_22_cfi_is_ret}, {ram_21_cfi_is_ret}, {ram_20_cfi_is_ret}, {ram_19_cfi_is_ret}, {ram_18_cfi_is_ret}, {ram_17_cfi_is_ret}, {ram_16_cfi_is_ret}, {ram_15_cfi_is_ret}, {ram_14_cfi_is_ret}, {ram_13_cfi_is_ret}, {ram_12_cfi_is_ret}, {ram_11_cfi_is_ret}, {ram_10_cfi_is_ret}, {ram_9_cfi_is_ret}, {ram_8_cfi_is_ret}, {ram_7_cfi_is_ret}, {ram_6_cfi_is_ret}, {ram_5_cfi_is_ret}, {ram_4_cfi_is_ret}, {ram_3_cfi_is_ret}, {ram_2_cfi_is_ret}, {ram_1_cfi_is_ret}, {ram_0_cfi_is_ret}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0] _GEN_15 = {{ram_31_cfi_npc_plus4}, {ram_30_cfi_npc_plus4}, {ram_29_cfi_npc_plus4}, {ram_28_cfi_npc_plus4}, {ram_27_cfi_npc_plus4}, {ram_26_cfi_npc_plus4}, {ram_25_cfi_npc_plus4}, {ram_24_cfi_npc_plus4}, {ram_23_cfi_npc_plus4}, {ram_22_cfi_npc_plus4}, {ram_21_cfi_npc_plus4}, {ram_20_cfi_npc_plus4}, {ram_19_cfi_npc_plus4}, {ram_18_cfi_npc_plus4}, {ram_17_cfi_npc_plus4}, {ram_16_cfi_npc_plus4}, {ram_15_cfi_npc_plus4}, {ram_14_cfi_npc_plus4}, {ram_13_cfi_npc_plus4}, {ram_12_cfi_npc_plus4}, {ram_11_cfi_npc_plus4}, {ram_10_cfi_npc_plus4}, {ram_9_cfi_npc_plus4}, {ram_8_cfi_npc_plus4}, {ram_7_cfi_npc_plus4}, {ram_6_cfi_npc_plus4}, {ram_5_cfi_npc_plus4}, {ram_4_cfi_npc_plus4}, {ram_3_cfi_npc_plus4}, {ram_2_cfi_npc_plus4}, {ram_1_cfi_npc_plus4}, {ram_0_cfi_npc_plus4}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0][39:0] _GEN_16 = {{ram_31_ras_top}, {ram_30_ras_top}, {ram_29_ras_top}, {ram_28_ras_top}, {ram_27_ras_top}, {ram_26_ras_top}, {ram_25_ras_top}, {ram_24_ras_top}, {ram_23_ras_top}, {ram_22_ras_top}, {ram_21_ras_top}, {ram_20_ras_top}, {ram_19_ras_top}, {ram_18_ras_top}, {ram_17_ras_top}, {ram_16_ras_top}, {ram_15_ras_top}, {ram_14_ras_top}, {ram_13_ras_top}, {ram_12_ras_top}, {ram_11_ras_top}, {ram_10_ras_top}, {ram_9_ras_top}, {ram_8_ras_top}, {ram_7_ras_top}, {ram_6_ras_top}, {ram_5_ras_top}, {ram_4_ras_top}, {ram_3_ras_top}, {ram_2_ras_top}, {ram_1_ras_top}, {ram_0_ras_top}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0][4:0] _GEN_17 = {{ram_31_ras_idx}, {ram_30_ras_idx}, {ram_29_ras_idx}, {ram_28_ras_idx}, {ram_27_ras_idx}, {ram_26_ras_idx}, {ram_25_ras_idx}, {ram_24_ras_idx}, {ram_23_ras_idx}, {ram_22_ras_idx}, {ram_21_ras_idx}, {ram_20_ras_idx}, {ram_19_ras_idx}, {ram_18_ras_idx}, {ram_17_ras_idx}, {ram_16_ras_idx}, {ram_15_ras_idx}, {ram_14_ras_idx}, {ram_13_ras_idx}, {ram_12_ras_idx}, {ram_11_ras_idx}, {ram_10_ras_idx}, {ram_9_ras_idx}, {ram_8_ras_idx}, {ram_7_ras_idx}, {ram_6_ras_idx}, {ram_5_ras_idx}, {ram_4_ras_idx}, {ram_3_ras_idx}, {ram_2_ras_idx}, {ram_1_ras_idx}, {ram_0_ras_idx}}; // @[fetch-target-queue.scala:130:21, :221:26]
wire [31:0] _GEN_18 = {{ram_31_start_bank}, {ram_30_start_bank}, {ram_29_start_bank}, {ram_28_start_bank}, {ram_27_start_bank}, {ram_26_start_bank}, {ram_25_start_bank}, {ram_24_start_bank}, {ram_23_start_bank}, {ram_22_start_bank}, {ram_21_start_bank}, {ram_20_start_bank}, {ram_19_start_bank}, {ram_18_start_bank}, {ram_17_start_bank}, {ram_16_start_bank}, {ram_15_start_bank}, {ram_14_start_bank}, {ram_13_start_bank}, {ram_12_start_bank}, {ram_11_start_bank}, {ram_10_start_bank}, {ram_9_start_bank}, {ram_8_start_bank}, {ram_7_start_bank}, {ram_6_start_bank}, {ram_5_start_bank}, {ram_4_start_bank}, {ram_3_start_bank}, {ram_2_start_bank}, {ram_1_start_bank}, {ram_0_start_bank}}; // @[fetch-target-queue.scala:130:21, :221:26]
reg [39:0] bpd_pc; // @[fetch-target-queue.scala:229:26]
assign io_bpdupdate_bits_pc_0 = bpd_pc; // @[fetch-target-queue.scala:82:7, :229:26]
wire [31:0][39:0] _GEN_19 = {{pcs_31}, {pcs_30}, {pcs_29}, {pcs_28}, {pcs_27}, {pcs_26}, {pcs_25}, {pcs_24}, {pcs_23}, {pcs_22}, {pcs_21}, {pcs_20}, {pcs_19}, {pcs_18}, {pcs_17}, {pcs_16}, {pcs_15}, {pcs_14}, {pcs_13}, {pcs_12}, {pcs_11}, {pcs_10}, {pcs_9}, {pcs_8}, {pcs_7}, {pcs_6}, {pcs_5}, {pcs_4}, {pcs_3}, {pcs_2}, {pcs_1}, {pcs_0}}; // @[fetch-target-queue.scala:128:21, :229:26]
wire [5:0] _bpd_target_T = {1'h0, bpd_idx} + 6'h1; // @[util.scala:211:14]
wire [4:0] _bpd_target_T_1 = _bpd_target_T[4:0]; // @[util.scala:211:14]
wire [4:0] _bpd_target_T_2 = _bpd_target_T_1; // @[util.scala:211:{14,20}]
reg [39:0] bpd_target; // @[fetch-target-queue.scala:230:27]
assign io_bpdupdate_bits_target_0 = bpd_target; // @[fetch-target-queue.scala:82:7, :230:27]
reg REG; // @[fetch-target-queue.scala:235:23]
reg [4:0] bpd_repair_idx_REG; // @[fetch-target-queue.scala:237:37]
reg [4:0] bpd_end_idx_REG; // @[fetch-target-queue.scala:238:37]
wire [5:0] _GEN_20 = {1'h0, bpd_repair_idx}; // @[util.scala:211:14]
wire [5:0] _GEN_21 = _GEN_20 + 6'h1; // @[util.scala:211:14]
wire [5:0] _bpd_repair_idx_T; // @[util.scala:211:14]
assign _bpd_repair_idx_T = _GEN_21; // @[util.scala:211:14]
wire [5:0] _bpd_repair_idx_T_3; // @[util.scala:211:14]
assign _bpd_repair_idx_T_3 = _GEN_21; // @[util.scala:211:14]
wire [4:0] _bpd_repair_idx_T_1 = _bpd_repair_idx_T[4:0]; // @[util.scala:211:14]
wire [4:0] _bpd_repair_idx_T_2 = _bpd_repair_idx_T_1; // @[util.scala:211:{14,20}]
reg REG_1; // @[fetch-target-queue.scala:243:44]
wire [4:0] _bpd_repair_idx_T_4 = _bpd_repair_idx_T_3[4:0]; // @[util.scala:211:14]
wire [4:0] _bpd_repair_idx_T_5 = _bpd_repair_idx_T_4; // @[util.scala:211:{14,20}]
wire [5:0] _bpd_repair_idx_T_6 = _GEN_20 + 6'h1; // @[util.scala:211:14]
wire [4:0] _bpd_repair_idx_T_7 = _bpd_repair_idx_T_6[4:0]; // @[util.scala:211:14]
wire [4:0] _bpd_repair_idx_T_8 = _bpd_repair_idx_T_7; // @[util.scala:211:{14,20}]
wire _do_commit_update_T = ~bpd_update_mispredict; // @[fetch-target-queue.scala:213:38, :256:31]
wire _do_commit_update_T_1 = ~bpd_update_repair; // @[fetch-target-queue.scala:214:34, :257:31]
wire _do_commit_update_T_2 = _do_commit_update_T & _do_commit_update_T_1; // @[fetch-target-queue.scala:256:{31,54}, :257:31]
wire _do_commit_update_T_3 = bpd_ptr != deq_ptr; // @[fetch-target-queue.scala:120:27, :121:27, :258:40]
wire _do_commit_update_T_4 = _do_commit_update_T_2 & _do_commit_update_T_3; // @[fetch-target-queue.scala:256:54, :257:50, :258:40]
wire [5:0] _GEN_22 = {1'h0, bpd_ptr} + 6'h1; // @[util.scala:211:14]
wire [5:0] _do_commit_update_T_5; // @[util.scala:211:14]
assign _do_commit_update_T_5 = _GEN_22; // @[util.scala:211:14]
wire [5:0] _bpd_ptr_T; // @[util.scala:211:14]
assign _bpd_ptr_T = _GEN_22; // @[util.scala:211:14]
wire [4:0] _do_commit_update_T_6 = _do_commit_update_T_5[4:0]; // @[util.scala:211:14]
wire [4:0] _do_commit_update_T_7 = _do_commit_update_T_6; // @[util.scala:211:{14,20}]
wire _do_commit_update_T_8 = enq_ptr != _do_commit_update_T_7; // @[util.scala:211:20]
wire _do_commit_update_T_9 = _do_commit_update_T_4 & _do_commit_update_T_8; // @[fetch-target-queue.scala:257:50, :258:52, :259:40]
wire _do_commit_update_T_10 = ~io_brupdate_b2_mispredict_0; // @[fetch-target-queue.scala:82:7, :260:31]
wire _do_commit_update_T_11 = _do_commit_update_T_9 & _do_commit_update_T_10; // @[fetch-target-queue.scala:258:52, :259:74, :260:31]
wire _do_commit_update_T_12 = ~io_redirect_valid_0; // @[fetch-target-queue.scala:82:7, :261:31]
wire _do_commit_update_T_13 = _do_commit_update_T_11 & _do_commit_update_T_12; // @[fetch-target-queue.scala:259:74, :260:58, :261:31]
reg do_commit_update_REG; // @[fetch-target-queue.scala:261:61]
wire _do_commit_update_T_14 = ~do_commit_update_REG; // @[fetch-target-queue.scala:261:{53,61}]
wire do_commit_update = _do_commit_update_T_13 & _do_commit_update_T_14; // @[fetch-target-queue.scala:260:58, :261:{50,53}]
reg REG_2; // @[fetch-target-queue.scala:265:16]
wire valid_repair = bpd_pc != bpd_repair_pc; // @[fetch-target-queue.scala:217:26, :229:26, :267:31]
wire _io_bpdupdate_valid_T = ~first_empty; // @[fetch-target-queue.scala:201:28, :269:28]
wire _io_bpdupdate_valid_T_1 = |bpd_entry_br_mask; // @[fetch-target-queue.scala:221:26, :270:74]
wire _io_bpdupdate_valid_T_2 = bpd_entry_cfi_idx_valid | _io_bpdupdate_valid_T_1; // @[fetch-target-queue.scala:221:26, :270:{53,74}]
wire _io_bpdupdate_valid_T_3 = _io_bpdupdate_valid_T & _io_bpdupdate_valid_T_2; // @[fetch-target-queue.scala:269:{28,41}, :270:53]
reg io_bpdupdate_valid_REG; // @[fetch-target-queue.scala:271:37]
wire _io_bpdupdate_valid_T_4 = ~valid_repair; // @[fetch-target-queue.scala:267:31, :271:59]
wire _io_bpdupdate_valid_T_5 = io_bpdupdate_valid_REG & _io_bpdupdate_valid_T_4; // @[fetch-target-queue.scala:271:{37,56,59}]
wire _io_bpdupdate_valid_T_6 = ~_io_bpdupdate_valid_T_5; // @[fetch-target-queue.scala:271:{28,56}]
wire _io_bpdupdate_valid_T_7 = _io_bpdupdate_valid_T_3 & _io_bpdupdate_valid_T_6; // @[fetch-target-queue.scala:269:41, :270:83, :271:28]
assign io_bpdupdate_valid_0 = REG_2 & _io_bpdupdate_valid_T_7; // @[fetch-target-queue.scala:82:7, :193:22, :265:{16,80}, :269:24, :270:83]
reg io_bpdupdate_bits_is_mispredict_update_REG; // @[fetch-target-queue.scala:272:54]
assign io_bpdupdate_bits_is_mispredict_update_0 = io_bpdupdate_bits_is_mispredict_update_REG; // @[fetch-target-queue.scala:82:7, :272:54]
reg io_bpdupdate_bits_is_repair_update_REG; // @[fetch-target-queue.scala:273:54]
assign io_bpdupdate_bits_is_repair_update_0 = io_bpdupdate_bits_is_repair_update_REG; // @[fetch-target-queue.scala:82:7, :273:54]
wire [7:0] _GEN_23 = {5'h0, bpd_entry_cfi_idx_bits}; // @[OneHot.scala:58:35]
wire [7:0] _io_bpdupdate_bits_br_mask_T = 8'h1 << _GEN_23; // @[OneHot.scala:58:35]
wire [7:0] _io_bpdupdate_bits_br_mask_T_1 = _io_bpdupdate_bits_br_mask_T; // @[OneHot.scala:58:35]
wire [7:0] _io_bpdupdate_bits_br_mask_T_2 = {1'h0, _io_bpdupdate_bits_br_mask_T[7:1]}; // @[OneHot.scala:58:35]
wire [7:0] _io_bpdupdate_bits_br_mask_T_3 = {2'h0, _io_bpdupdate_bits_br_mask_T[7:2]}; // @[OneHot.scala:58:35]
wire [7:0] _io_bpdupdate_bits_br_mask_T_4 = {3'h0, _io_bpdupdate_bits_br_mask_T[7:3]}; // @[OneHot.scala:58:35]
wire [7:0] _io_bpdupdate_bits_br_mask_T_5 = {4'h0, _io_bpdupdate_bits_br_mask_T[7:4]}; // @[OneHot.scala:58:35]
wire [7:0] _io_bpdupdate_bits_br_mask_T_6 = {5'h0, _io_bpdupdate_bits_br_mask_T[7:5]}; // @[OneHot.scala:58:35]
wire [7:0] _io_bpdupdate_bits_br_mask_T_7 = {6'h0, _io_bpdupdate_bits_br_mask_T[7:6]}; // @[OneHot.scala:58:35]
wire [7:0] _io_bpdupdate_bits_br_mask_T_8 = {7'h0, _io_bpdupdate_bits_br_mask_T[7]}; // @[OneHot.scala:58:35]
wire [7:0] _io_bpdupdate_bits_br_mask_T_9 = _io_bpdupdate_bits_br_mask_T_1 | _io_bpdupdate_bits_br_mask_T_2; // @[util.scala:383:{29,45}]
wire [7:0] _io_bpdupdate_bits_br_mask_T_10 = _io_bpdupdate_bits_br_mask_T_9 | _io_bpdupdate_bits_br_mask_T_3; // @[util.scala:383:{29,45}]
wire [7:0] _io_bpdupdate_bits_br_mask_T_11 = _io_bpdupdate_bits_br_mask_T_10 | _io_bpdupdate_bits_br_mask_T_4; // @[util.scala:383:{29,45}]
wire [7:0] _io_bpdupdate_bits_br_mask_T_12 = _io_bpdupdate_bits_br_mask_T_11 | _io_bpdupdate_bits_br_mask_T_5; // @[util.scala:383:{29,45}]
wire [7:0] _io_bpdupdate_bits_br_mask_T_13 = _io_bpdupdate_bits_br_mask_T_12 | _io_bpdupdate_bits_br_mask_T_6; // @[util.scala:383:{29,45}]
wire [7:0] _io_bpdupdate_bits_br_mask_T_14 = _io_bpdupdate_bits_br_mask_T_13 | _io_bpdupdate_bits_br_mask_T_7; // @[util.scala:383:{29,45}]
wire [7:0] _io_bpdupdate_bits_br_mask_T_15 = _io_bpdupdate_bits_br_mask_T_14 | _io_bpdupdate_bits_br_mask_T_8; // @[util.scala:383:{29,45}]
wire [7:0] _io_bpdupdate_bits_br_mask_T_16 = _io_bpdupdate_bits_br_mask_T_15 & bpd_entry_br_mask; // @[util.scala:383:45]
assign _io_bpdupdate_bits_br_mask_T_17 = bpd_entry_cfi_idx_valid ? _io_bpdupdate_bits_br_mask_T_16 : bpd_entry_br_mask; // @[fetch-target-queue.scala:221:26, :276:37, :277:36]
assign io_bpdupdate_bits_br_mask_0 = _io_bpdupdate_bits_br_mask_T_17; // @[fetch-target-queue.scala:82:7, :276:37]
wire [7:0] _io_bpdupdate_bits_cfi_is_br_T = bpd_entry_br_mask >> _GEN_23; // @[OneHot.scala:58:35]
assign _io_bpdupdate_bits_cfi_is_br_T_1 = _io_bpdupdate_bits_cfi_is_br_T[0]; // @[fetch-target-queue.scala:282:54]
assign io_bpdupdate_bits_cfi_is_br_0 = _io_bpdupdate_bits_cfi_is_br_T_1; // @[fetch-target-queue.scala:82:7, :282:54]
wire _io_bpdupdate_bits_cfi_is_jal_T = bpd_entry_cfi_type == 3'h2; // @[fetch-target-queue.scala:221:26, :283:56]
wire _io_bpdupdate_bits_cfi_is_jal_T_1 = bpd_entry_cfi_type == 3'h3; // @[fetch-target-queue.scala:221:26, :283:90]
assign _io_bpdupdate_bits_cfi_is_jal_T_2 = _io_bpdupdate_bits_cfi_is_jal_T | _io_bpdupdate_bits_cfi_is_jal_T_1; // @[fetch-target-queue.scala:283:{56,68,90}]
assign io_bpdupdate_bits_cfi_is_jal_0 = _io_bpdupdate_bits_cfi_is_jal_T_2; // @[fetch-target-queue.scala:82:7, :283:68]
wire [4:0] _bpd_ptr_T_1 = _bpd_ptr_T[4:0]; // @[util.scala:211:14]
wire [4:0] _bpd_ptr_T_2 = _bpd_ptr_T_1; // @[util.scala:211:{14,20}]
wire _io_enq_ready_T = ~full; // @[fetch-target-queue.scala:124:81, :295:27]
wire _io_enq_ready_T_1 = _io_enq_ready_T | do_commit_update; // @[fetch-target-queue.scala:261:50, :295:{27,33}]
reg io_enq_ready_REG; // @[fetch-target-queue.scala:295:26]
assign io_enq_ready_0 = io_enq_ready_REG; // @[fetch-target-queue.scala:82:7, :295:26]
wire redirect_new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:299:36]
wire [2:0] redirect_new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:299:36]
wire redirect_new_entry_cfi_taken; // @[fetch-target-queue.scala:299:36]
wire redirect_new_entry_cfi_mispredicted; // @[fetch-target-queue.scala:299:36]
wire [2:0] redirect_new_entry_cfi_type; // @[fetch-target-queue.scala:299:36]
wire [7:0] redirect_new_entry_br_mask; // @[fetch-target-queue.scala:299:36]
wire redirect_new_entry_cfi_is_call; // @[fetch-target-queue.scala:299:36]
wire redirect_new_entry_cfi_is_ret; // @[fetch-target-queue.scala:299:36]
wire redirect_new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:299:36]
wire [39:0] redirect_new_entry_ras_top; // @[fetch-target-queue.scala:299:36]
wire [4:0] redirect_new_entry_ras_idx; // @[fetch-target-queue.scala:299:36]
wire redirect_new_entry_start_bank; // @[fetch-target-queue.scala:299:36]
assign redirect_new_entry_cfi_type = _GEN_11[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36]
assign redirect_new_entry_br_mask = _GEN_12[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36]
assign redirect_new_entry_cfi_npc_plus4 = _GEN_15[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36]
assign redirect_new_entry_ras_top = _GEN_16[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36]
assign redirect_new_entry_ras_idx = _GEN_17[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36]
assign redirect_new_entry_start_bank = _GEN_18[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36]
wire _new_cfi_idx_T = _GEN_18[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36, :306:37]
wire [5:0] _enq_ptr_T_3 = {1'h0, io_redirect_bits_0} + 6'h1; // @[util.scala:211:14]
wire [4:0] _enq_ptr_T_4 = _enq_ptr_T_3[4:0]; // @[util.scala:211:14]
wire [4:0] _enq_ptr_T_5 = _enq_ptr_T_4; // @[util.scala:211:{14,20}]
wire [3:0] _new_cfi_idx_T_2 = {_new_cfi_idx_T, 3'h0}; // @[fetch-target-queue.scala:306:{10,37}]
wire [5:0] _new_cfi_idx_T_3 = {io_brupdate_b2_uop_pc_lob_0[5:4], io_brupdate_b2_uop_pc_lob_0[3:0] ^ _new_cfi_idx_T_2}; // @[fetch-target-queue.scala:82:7, :305:50, :306:10]
wire [2:0] new_cfi_idx = _new_cfi_idx_T_3[3:1]; // @[fetch-target-queue.scala:305:50, :306:79]
wire _GEN_24 = io_redirect_valid_0 & io_brupdate_b2_mispredict_0; // @[fetch-target-queue.scala:82:7, :299:36, :301:28, :304:38, :307:43]
assign redirect_new_entry_cfi_idx_valid = _GEN_24 | _GEN_7[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36, :301:28, :304:38, :307:43]
assign redirect_new_entry_cfi_idx_bits = _GEN_24 ? new_cfi_idx : _GEN_8[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36, :301:28, :304:38, :306:79, :307:43, :308:43]
assign redirect_new_entry_cfi_mispredicted = _GEN_24 | _GEN_10[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36, :301:28, :304:38, :307:43, :309:43]
assign redirect_new_entry_cfi_taken = _GEN_24 ? io_brupdate_b2_taken_0 : _GEN_9[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36, :301:28, :304:38, :307:43, :310:43]
wire _GEN_25 = _GEN_8[io_redirect_bits_0] == new_cfi_idx; // @[fetch-target-queue.scala:82:7, :221:26, :299:36, :306:79, :311:104]
wire _redirect_new_entry_cfi_is_call_T; // @[fetch-target-queue.scala:311:104]
assign _redirect_new_entry_cfi_is_call_T = _GEN_25; // @[fetch-target-queue.scala:311:104]
wire _redirect_new_entry_cfi_is_ret_T; // @[fetch-target-queue.scala:312:104]
assign _redirect_new_entry_cfi_is_ret_T = _GEN_25; // @[fetch-target-queue.scala:311:104, :312:104]
wire _redirect_new_entry_cfi_is_call_T_1 = _GEN_13[io_redirect_bits_0] & _redirect_new_entry_cfi_is_call_T; // @[fetch-target-queue.scala:82:7, :221:26, :299:36, :311:{73,104}]
assign redirect_new_entry_cfi_is_call = _GEN_24 ? _redirect_new_entry_cfi_is_call_T_1 : _GEN_13[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36, :301:28, :304:38, :307:43, :311:{43,73}]
wire _redirect_new_entry_cfi_is_ret_T_1 = _GEN_14[io_redirect_bits_0] & _redirect_new_entry_cfi_is_ret_T; // @[fetch-target-queue.scala:82:7, :221:26, :299:36, :312:{73,104}]
assign redirect_new_entry_cfi_is_ret = _GEN_24 ? _redirect_new_entry_cfi_is_ret_T_1 : _GEN_14[io_redirect_bits_0]; // @[fetch-target-queue.scala:82:7, :221:26, :299:36, :301:28, :304:38, :307:43, :312:{43,73}]
assign ras_update_pc = io_redirect_valid_0 ? redirect_new_entry_ras_top : 40'h0; // @[fetch-target-queue.scala:82:7, :207:31, :299:36, :301:28, :316:20]
assign ras_update_idx = io_redirect_valid_0 ? redirect_new_entry_ras_idx : 5'h0; // @[fetch-target-queue.scala:82:7, :208:32, :299:36, :301:28, :317:20]
reg REG_3; // @[fetch-target-queue.scala:319:23]
reg prev_entry_REG_cfi_idx_valid; // @[fetch-target-queue.scala:320:26]
reg [2:0] prev_entry_REG_cfi_idx_bits; // @[fetch-target-queue.scala:320:26]
reg prev_entry_REG_cfi_taken; // @[fetch-target-queue.scala:320:26]
reg prev_entry_REG_cfi_mispredicted; // @[fetch-target-queue.scala:320:26]
reg [2:0] prev_entry_REG_cfi_type; // @[fetch-target-queue.scala:320:26]
reg [7:0] prev_entry_REG_br_mask; // @[fetch-target-queue.scala:320:26]
reg prev_entry_REG_cfi_is_call; // @[fetch-target-queue.scala:320:26]
reg prev_entry_REG_cfi_is_ret; // @[fetch-target-queue.scala:320:26]
reg prev_entry_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:320:26]
reg [39:0] prev_entry_REG_ras_top; // @[fetch-target-queue.scala:320:26]
reg [4:0] prev_entry_REG_ras_idx; // @[fetch-target-queue.scala:320:26]
reg prev_entry_REG_start_bank; // @[fetch-target-queue.scala:320:26]
reg [4:0] REG_4; // @[fetch-target-queue.scala:324:16]
reg ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:324:46]
reg [2:0] ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:324:46]
reg ram_REG_cfi_taken; // @[fetch-target-queue.scala:324:46]
reg ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:324:46]
reg [2:0] ram_REG_cfi_type; // @[fetch-target-queue.scala:324:46]
reg [7:0] ram_REG_br_mask; // @[fetch-target-queue.scala:324:46]
reg ram_REG_cfi_is_call; // @[fetch-target-queue.scala:324:46]
reg ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:324:46]
reg ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:324:46]
reg [39:0] ram_REG_ras_top; // @[fetch-target-queue.scala:324:46]
reg [4:0] ram_REG_ras_idx; // @[fetch-target-queue.scala:324:46]
reg ram_REG_start_bank; // @[fetch-target-queue.scala:324:46]
wire [4:0] idx = _idx_T ? 5'h0 : io_arb_ftq_reqs_0_0; // @[fetch-target-queue.scala:82:7, :332:{18,25}]
wire [4:0] _io_rrd_ftq_resps_0_ghist_WIRE = idx; // @[fetch-target-queue.scala:332:18, :338:51]
wire _is_enq_T = idx == enq_ptr; // @[fetch-target-queue.scala:122:27, :332:18, :333:23]
wire is_enq = _is_enq_T & _is_enq_T_1; // @[Decoupled.scala:51:35]
reg io_rrd_ftq_resps_0_entry_REG_cfi_idx_valid; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_cfi_idx_valid_0 = io_rrd_ftq_resps_0_entry_REG_cfi_idx_valid; // @[fetch-target-queue.scala:82:7, :336:45]
reg [2:0] io_rrd_ftq_resps_0_entry_REG_cfi_idx_bits; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_cfi_idx_bits_0 = io_rrd_ftq_resps_0_entry_REG_cfi_idx_bits; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_0_entry_REG_cfi_taken; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_cfi_taken_0 = io_rrd_ftq_resps_0_entry_REG_cfi_taken; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_0_entry_REG_cfi_mispredicted; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_cfi_mispredicted_0 = io_rrd_ftq_resps_0_entry_REG_cfi_mispredicted; // @[fetch-target-queue.scala:82:7, :336:45]
reg [2:0] io_rrd_ftq_resps_0_entry_REG_cfi_type; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_cfi_type_0 = io_rrd_ftq_resps_0_entry_REG_cfi_type; // @[fetch-target-queue.scala:82:7, :336:45]
reg [7:0] io_rrd_ftq_resps_0_entry_REG_br_mask; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_br_mask_0 = io_rrd_ftq_resps_0_entry_REG_br_mask; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_0_entry_REG_cfi_is_call; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_cfi_is_call_0 = io_rrd_ftq_resps_0_entry_REG_cfi_is_call; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_0_entry_REG_cfi_is_ret; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_cfi_is_ret_0 = io_rrd_ftq_resps_0_entry_REG_cfi_is_ret; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_0_entry_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_cfi_npc_plus4_0 = io_rrd_ftq_resps_0_entry_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:82:7, :336:45]
reg [39:0] io_rrd_ftq_resps_0_entry_REG_ras_top; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_ras_top_0 = io_rrd_ftq_resps_0_entry_REG_ras_top; // @[fetch-target-queue.scala:82:7, :336:45]
reg [4:0] io_rrd_ftq_resps_0_entry_REG_ras_idx; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_ras_idx_0 = io_rrd_ftq_resps_0_entry_REG_ras_idx; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_0_entry_REG_start_bank; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_0_entry_start_bank_0 = io_rrd_ftq_resps_0_entry_REG_start_bank; // @[fetch-target-queue.scala:82:7, :336:45]
wire [39:0] _io_rrd_ftq_resps_0_pc_T = is_enq ? io_enq_bits_pc_0 : _GEN_19[idx]; // @[fetch-target-queue.scala:82:7, :229:26, :332:18, :333:36, :342:49]
reg [39:0] io_rrd_ftq_resps_0_pc_REG; // @[fetch-target-queue.scala:342:45]
assign io_rrd_ftq_resps_0_pc_0 = io_rrd_ftq_resps_0_pc_REG; // @[fetch-target-queue.scala:82:7, :342:45]
wire _io_rrd_ftq_resps_0_valid_T = idx != enq_ptr; // @[fetch-target-queue.scala:122:27, :332:18, :343:50]
wire _io_rrd_ftq_resps_0_valid_T_1 = _io_rrd_ftq_resps_0_valid_T | is_enq; // @[fetch-target-queue.scala:333:36, :343:{50,62}]
reg io_rrd_ftq_resps_0_valid_REG; // @[fetch-target-queue.scala:343:45]
assign io_rrd_ftq_resps_0_valid_0 = io_rrd_ftq_resps_0_valid_REG; // @[fetch-target-queue.scala:82:7, :343:45]
wire [4:0] idx_1 = _idx_T_1 ? 5'h0 : io_arb_ftq_reqs_1_0; // @[fetch-target-queue.scala:82:7, :332:{18,25}]
wire _is_enq_T_2 = idx_1 == enq_ptr; // @[fetch-target-queue.scala:122:27, :332:18, :333:23]
wire is_enq_1 = _is_enq_T_2 & _is_enq_T_3; // @[Decoupled.scala:51:35]
reg io_rrd_ftq_resps_1_entry_REG_cfi_idx_valid; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_cfi_idx_valid_0 = io_rrd_ftq_resps_1_entry_REG_cfi_idx_valid; // @[fetch-target-queue.scala:82:7, :336:45]
reg [2:0] io_rrd_ftq_resps_1_entry_REG_cfi_idx_bits; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_cfi_idx_bits_0 = io_rrd_ftq_resps_1_entry_REG_cfi_idx_bits; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_1_entry_REG_cfi_taken; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_cfi_taken_0 = io_rrd_ftq_resps_1_entry_REG_cfi_taken; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_1_entry_REG_cfi_mispredicted; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_cfi_mispredicted_0 = io_rrd_ftq_resps_1_entry_REG_cfi_mispredicted; // @[fetch-target-queue.scala:82:7, :336:45]
reg [2:0] io_rrd_ftq_resps_1_entry_REG_cfi_type; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_cfi_type_0 = io_rrd_ftq_resps_1_entry_REG_cfi_type; // @[fetch-target-queue.scala:82:7, :336:45]
reg [7:0] io_rrd_ftq_resps_1_entry_REG_br_mask; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_br_mask_0 = io_rrd_ftq_resps_1_entry_REG_br_mask; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_1_entry_REG_cfi_is_call; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_cfi_is_call_0 = io_rrd_ftq_resps_1_entry_REG_cfi_is_call; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_1_entry_REG_cfi_is_ret; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_cfi_is_ret_0 = io_rrd_ftq_resps_1_entry_REG_cfi_is_ret; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_1_entry_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_cfi_npc_plus4_0 = io_rrd_ftq_resps_1_entry_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:82:7, :336:45]
reg [39:0] io_rrd_ftq_resps_1_entry_REG_ras_top; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_ras_top_0 = io_rrd_ftq_resps_1_entry_REG_ras_top; // @[fetch-target-queue.scala:82:7, :336:45]
reg [4:0] io_rrd_ftq_resps_1_entry_REG_ras_idx; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_ras_idx_0 = io_rrd_ftq_resps_1_entry_REG_ras_idx; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_1_entry_REG_start_bank; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_1_entry_start_bank_0 = io_rrd_ftq_resps_1_entry_REG_start_bank; // @[fetch-target-queue.scala:82:7, :336:45]
wire [39:0] _io_rrd_ftq_resps_1_pc_T = is_enq_1 ? io_enq_bits_pc_0 : _GEN_19[idx_1]; // @[fetch-target-queue.scala:82:7, :229:26, :332:18, :333:36, :342:49]
reg [39:0] io_rrd_ftq_resps_1_pc_REG; // @[fetch-target-queue.scala:342:45]
assign io_rrd_ftq_resps_1_pc_0 = io_rrd_ftq_resps_1_pc_REG; // @[fetch-target-queue.scala:82:7, :342:45]
wire _io_rrd_ftq_resps_1_valid_T = idx_1 != enq_ptr; // @[fetch-target-queue.scala:122:27, :332:18, :343:50]
wire _io_rrd_ftq_resps_1_valid_T_1 = _io_rrd_ftq_resps_1_valid_T | is_enq_1; // @[fetch-target-queue.scala:333:36, :343:{50,62}]
reg io_rrd_ftq_resps_1_valid_REG; // @[fetch-target-queue.scala:343:45]
assign io_rrd_ftq_resps_1_valid_0 = io_rrd_ftq_resps_1_valid_REG; // @[fetch-target-queue.scala:82:7, :343:45]
wire [4:0] idx_2 = _idx_T_2 ? 5'h0 : io_arb_ftq_reqs_2_0; // @[fetch-target-queue.scala:82:7, :332:{18,25}]
wire _is_enq_T_4 = idx_2 == enq_ptr; // @[fetch-target-queue.scala:122:27, :332:18, :333:23]
wire is_enq_2 = _is_enq_T_4 & _is_enq_T_5; // @[Decoupled.scala:51:35]
reg io_rrd_ftq_resps_2_entry_REG_cfi_idx_valid; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_cfi_idx_valid_0 = io_rrd_ftq_resps_2_entry_REG_cfi_idx_valid; // @[fetch-target-queue.scala:82:7, :336:45]
reg [2:0] io_rrd_ftq_resps_2_entry_REG_cfi_idx_bits; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_cfi_idx_bits_0 = io_rrd_ftq_resps_2_entry_REG_cfi_idx_bits; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_2_entry_REG_cfi_taken; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_cfi_taken_0 = io_rrd_ftq_resps_2_entry_REG_cfi_taken; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_2_entry_REG_cfi_mispredicted; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_cfi_mispredicted_0 = io_rrd_ftq_resps_2_entry_REG_cfi_mispredicted; // @[fetch-target-queue.scala:82:7, :336:45]
reg [2:0] io_rrd_ftq_resps_2_entry_REG_cfi_type; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_cfi_type_0 = io_rrd_ftq_resps_2_entry_REG_cfi_type; // @[fetch-target-queue.scala:82:7, :336:45]
reg [7:0] io_rrd_ftq_resps_2_entry_REG_br_mask; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_br_mask_0 = io_rrd_ftq_resps_2_entry_REG_br_mask; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_2_entry_REG_cfi_is_call; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_cfi_is_call_0 = io_rrd_ftq_resps_2_entry_REG_cfi_is_call; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_2_entry_REG_cfi_is_ret; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_cfi_is_ret_0 = io_rrd_ftq_resps_2_entry_REG_cfi_is_ret; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_2_entry_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_cfi_npc_plus4_0 = io_rrd_ftq_resps_2_entry_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:82:7, :336:45]
reg [39:0] io_rrd_ftq_resps_2_entry_REG_ras_top; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_ras_top_0 = io_rrd_ftq_resps_2_entry_REG_ras_top; // @[fetch-target-queue.scala:82:7, :336:45]
reg [4:0] io_rrd_ftq_resps_2_entry_REG_ras_idx; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_ras_idx_0 = io_rrd_ftq_resps_2_entry_REG_ras_idx; // @[fetch-target-queue.scala:82:7, :336:45]
reg io_rrd_ftq_resps_2_entry_REG_start_bank; // @[fetch-target-queue.scala:336:45]
assign io_rrd_ftq_resps_2_entry_start_bank_0 = io_rrd_ftq_resps_2_entry_REG_start_bank; // @[fetch-target-queue.scala:82:7, :336:45]
wire [39:0] _io_rrd_ftq_resps_2_pc_T = is_enq_2 ? io_enq_bits_pc_0 : _GEN_19[idx_2]; // @[fetch-target-queue.scala:82:7, :229:26, :332:18, :333:36, :342:49]
reg [39:0] io_rrd_ftq_resps_2_pc_REG; // @[fetch-target-queue.scala:342:45]
assign io_rrd_ftq_resps_2_pc_0 = io_rrd_ftq_resps_2_pc_REG; // @[fetch-target-queue.scala:82:7, :342:45]
wire _io_rrd_ftq_resps_2_valid_T = idx_2 != enq_ptr; // @[fetch-target-queue.scala:122:27, :332:18, :343:50]
wire _io_rrd_ftq_resps_2_valid_T_1 = _io_rrd_ftq_resps_2_valid_T | is_enq_2; // @[fetch-target-queue.scala:333:36, :343:{50,62}]
reg io_rrd_ftq_resps_2_valid_REG; // @[fetch-target-queue.scala:343:45]
assign io_rrd_ftq_resps_2_valid_0 = io_rrd_ftq_resps_2_valid_REG; // @[fetch-target-queue.scala:82:7, :343:45]
reg [39:0] io_com_pc_REG; // @[fetch-target-queue.scala:346:23]
assign io_com_pc_0 = io_com_pc_REG; // @[fetch-target-queue.scala:82:7, :346:23]
reg [39:0] io_debug_fetch_pc_0_REG; // @[fetch-target-queue.scala:349:36]
assign io_debug_fetch_pc_0_0 = io_debug_fetch_pc_0_REG; // @[fetch-target-queue.scala:82:7, :349:36]
reg [39:0] io_debug_fetch_pc_1_REG; // @[fetch-target-queue.scala:349:36]
assign io_debug_fetch_pc_1_0 = io_debug_fetch_pc_1_REG; // @[fetch-target-queue.scala:82:7, :349:36]
reg [39:0] io_debug_fetch_pc_2_REG; // @[fetch-target-queue.scala:349:36]
assign io_debug_fetch_pc_2_0 = io_debug_fetch_pc_2_REG; // @[fetch-target-queue.scala:82:7, :349:36]
wire _GEN_26 = io_redirect_valid_0 | ~REG_3; // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}]
wire _GEN_27 = do_enq & enq_ptr == 5'h0; // @[Decoupled.scala:51:35]
wire _GEN_28 = do_enq & enq_ptr == 5'h1; // @[Decoupled.scala:51:35]
wire _GEN_29 = do_enq & enq_ptr == 5'h2; // @[Decoupled.scala:51:35]
wire _GEN_30 = do_enq & enq_ptr == 5'h3; // @[Decoupled.scala:51:35]
wire _GEN_31 = do_enq & enq_ptr == 5'h4; // @[Decoupled.scala:51:35]
wire _GEN_32 = do_enq & enq_ptr == 5'h5; // @[Decoupled.scala:51:35]
wire _GEN_33 = do_enq & enq_ptr == 5'h6; // @[Decoupled.scala:51:35]
wire _GEN_34 = do_enq & enq_ptr == 5'h7; // @[Decoupled.scala:51:35]
wire _GEN_35 = do_enq & enq_ptr == 5'h8; // @[Decoupled.scala:51:35]
wire _GEN_36 = do_enq & enq_ptr == 5'h9; // @[Decoupled.scala:51:35]
wire _GEN_37 = do_enq & enq_ptr == 5'hA; // @[Decoupled.scala:51:35]
wire _GEN_38 = do_enq & enq_ptr == 5'hB; // @[Decoupled.scala:51:35]
wire _GEN_39 = do_enq & enq_ptr == 5'hC; // @[Decoupled.scala:51:35]
wire _GEN_40 = do_enq & enq_ptr == 5'hD; // @[Decoupled.scala:51:35]
wire _GEN_41 = do_enq & enq_ptr == 5'hE; // @[Decoupled.scala:51:35]
wire _GEN_42 = do_enq & enq_ptr == 5'hF; // @[Decoupled.scala:51:35]
wire _GEN_43 = do_enq & enq_ptr == 5'h10; // @[Decoupled.scala:51:35]
wire _GEN_44 = do_enq & enq_ptr == 5'h11; // @[Decoupled.scala:51:35]
wire _GEN_45 = do_enq & enq_ptr == 5'h12; // @[Decoupled.scala:51:35]
wire _GEN_46 = do_enq & enq_ptr == 5'h13; // @[Decoupled.scala:51:35]
wire _GEN_47 = do_enq & enq_ptr == 5'h14; // @[Decoupled.scala:51:35]
wire _GEN_48 = do_enq & enq_ptr == 5'h15; // @[Decoupled.scala:51:35]
wire _GEN_49 = do_enq & enq_ptr == 5'h16; // @[Decoupled.scala:51:35]
wire _GEN_50 = do_enq & enq_ptr == 5'h17; // @[Decoupled.scala:51:35]
wire _GEN_51 = do_enq & enq_ptr == 5'h18; // @[Decoupled.scala:51:35]
wire _GEN_52 = do_enq & enq_ptr == 5'h19; // @[Decoupled.scala:51:35]
wire _GEN_53 = do_enq & enq_ptr == 5'h1A; // @[Decoupled.scala:51:35]
wire _GEN_54 = do_enq & enq_ptr == 5'h1B; // @[Decoupled.scala:51:35]
wire _GEN_55 = do_enq & enq_ptr == 5'h1C; // @[Decoupled.scala:51:35]
wire _GEN_56 = do_enq & enq_ptr == 5'h1D; // @[Decoupled.scala:51:35]
wire _GEN_57 = do_enq & enq_ptr == 5'h1E; // @[Decoupled.scala:51:35]
wire _GEN_58 = do_enq & (&enq_ptr); // @[Decoupled.scala:51:35]
wire _T = bpd_update_repair & REG_1; // @[fetch-target-queue.scala:214:34, :243:{34,44}]
wire _GEN_59 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h0); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_60 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h1); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_61 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h2); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_62 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h3); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_63 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h4); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_64 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h5); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_65 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h6); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_66 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h7); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_67 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h8); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_68 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h9); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_69 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'hA); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_70 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'hB); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_71 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'hC); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_72 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'hD); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_73 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'hE); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_74 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'hF); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_75 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h10); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_76 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h11); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_77 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h12); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_78 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h13); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_79 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h14); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_80 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h15); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_81 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h16); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_82 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h17); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_83 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h18); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_84 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h19); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_85 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h1A); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_86 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h1B); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_87 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h1C); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_88 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h1D); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_89 = io_redirect_valid_0 | ~(REG_3 & REG_4 == 5'h1E); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
wire _GEN_90 = io_redirect_valid_0 | ~(REG_3 & (®_4)); // @[fetch-target-queue.scala:82:7, :145:17, :301:28, :319:{23,44}, :324:{16,36}]
always @(posedge clock) begin // @[fetch-target-queue.scala:82:7]
if (reset) begin // @[fetch-target-queue.scala:82:7]
bpd_ptr <= 5'h0; // @[fetch-target-queue.scala:120:27]
deq_ptr <= 5'h0; // @[fetch-target-queue.scala:121:27]
enq_ptr <= 5'h1; // @[fetch-target-queue.scala:122:27]
prev_ghist_old_history <= 64'h0; // @[fetch-target-queue.scala:142:27]
prev_ghist_current_saw_branch_not_taken <= 1'h0; // @[fetch-target-queue.scala:142:27]
prev_ghist_new_saw_branch_not_taken <= 1'h0; // @[fetch-target-queue.scala:142:27]
prev_ghist_new_saw_branch_taken <= 1'h0; // @[fetch-target-queue.scala:142:27]
prev_ghist_ras_idx <= 5'h0; // @[fetch-target-queue.scala:142:27]
prev_entry_cfi_idx_valid <= 1'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_cfi_idx_bits <= 3'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_cfi_taken <= 1'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_cfi_mispredicted <= 1'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_cfi_type <= 3'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_br_mask <= 8'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_cfi_is_call <= 1'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_cfi_is_ret <= 1'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_cfi_npc_plus4 <= 1'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_ras_top <= 40'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_ras_idx <= 5'h0; // @[fetch-target-queue.scala:143:27]
prev_entry_start_bank <= 1'h0; // @[fetch-target-queue.scala:143:27]
prev_pc <= 40'h0; // @[fetch-target-queue.scala:144:27]
first_empty <= 1'h1; // @[fetch-target-queue.scala:201:28]
bpd_update_mispredict <= 1'h0; // @[fetch-target-queue.scala:213:38]
bpd_update_repair <= 1'h0; // @[fetch-target-queue.scala:214:34]
end
else begin // @[fetch-target-queue.scala:82:7]
if (do_commit_update) // @[fetch-target-queue.scala:261:50]
bpd_ptr <= _bpd_ptr_T_2; // @[util.scala:211:20]
if (io_deq_valid_0) // @[fetch-target-queue.scala:82:7]
deq_ptr <= io_deq_bits_0; // @[fetch-target-queue.scala:82:7, :121:27]
if (io_redirect_valid_0) // @[fetch-target-queue.scala:82:7]
enq_ptr <= _enq_ptr_T_5; // @[util.scala:211:20]
else if (do_enq) // @[Decoupled.scala:51:35]
enq_ptr <= _enq_ptr_T_2; // @[util.scala:211:20]
if (_GEN_26) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (do_enq) begin // @[Decoupled.scala:51:35]
prev_ghist_old_history <= new_ghist_old_history; // @[fetch-target-queue.scala:142:27, :165:24]
prev_ghist_current_saw_branch_not_taken <= new_ghist_current_saw_branch_not_taken; // @[fetch-target-queue.scala:142:27, :165:24]
prev_ghist_new_saw_branch_not_taken <= new_ghist_new_saw_branch_not_taken; // @[fetch-target-queue.scala:142:27, :165:24]
prev_ghist_new_saw_branch_taken <= new_ghist_new_saw_branch_taken; // @[fetch-target-queue.scala:142:27, :165:24]
prev_ghist_ras_idx <= new_ghist_ras_idx; // @[fetch-target-queue.scala:142:27, :165:24]
prev_entry_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:143:27, :149:25]
prev_entry_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:143:27, :149:25]
prev_entry_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:143:27, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
prev_ghist_old_history <= io_bpdupdate_bits_ghist_old_history_0; // @[fetch-target-queue.scala:82:7, :142:27]
prev_ghist_current_saw_branch_not_taken <= io_bpdupdate_bits_ghist_current_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7, :142:27]
prev_ghist_new_saw_branch_not_taken <= io_bpdupdate_bits_ghist_new_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7, :142:27]
prev_ghist_new_saw_branch_taken <= io_bpdupdate_bits_ghist_new_saw_branch_taken_0; // @[fetch-target-queue.scala:82:7, :142:27]
prev_ghist_ras_idx <= io_bpdupdate_bits_ghist_ras_idx_0; // @[fetch-target-queue.scala:82:7, :142:27]
prev_entry_cfi_idx_valid <= prev_entry_REG_cfi_idx_valid; // @[fetch-target-queue.scala:143:27, :320:26]
prev_entry_cfi_idx_bits <= prev_entry_REG_cfi_idx_bits; // @[fetch-target-queue.scala:143:27, :320:26]
prev_entry_cfi_taken <= prev_entry_REG_cfi_taken; // @[fetch-target-queue.scala:143:27, :320:26]
end
prev_entry_cfi_mispredicted <= _GEN_26 ? ~do_enq & prev_entry_cfi_mispredicted : prev_entry_REG_cfi_mispredicted; // @[Decoupled.scala:51:35]
if (_GEN_26) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (do_enq) begin // @[Decoupled.scala:51:35]
prev_entry_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:143:27, :149:25]
prev_entry_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:143:27, :149:25]
prev_entry_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:143:27, :149:25]
prev_entry_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:143:27, :149:25]
prev_entry_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:143:27, :149:25]
prev_entry_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:143:27, :149:25]
prev_entry_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:143:27, :149:25]
prev_entry_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:143:27, :149:25]
prev_pc <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :144:27]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
prev_entry_cfi_type <= prev_entry_REG_cfi_type; // @[fetch-target-queue.scala:143:27, :320:26]
prev_entry_br_mask <= prev_entry_REG_br_mask; // @[fetch-target-queue.scala:143:27, :320:26]
prev_entry_cfi_is_call <= prev_entry_REG_cfi_is_call; // @[fetch-target-queue.scala:143:27, :320:26]
prev_entry_cfi_is_ret <= prev_entry_REG_cfi_is_ret; // @[fetch-target-queue.scala:143:27, :320:26]
prev_entry_cfi_npc_plus4 <= prev_entry_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:143:27, :320:26]
prev_entry_ras_top <= prev_entry_REG_ras_top; // @[fetch-target-queue.scala:143:27, :320:26]
prev_entry_ras_idx <= prev_entry_REG_ras_idx; // @[fetch-target-queue.scala:143:27, :320:26]
prev_entry_start_bank <= prev_entry_REG_start_bank; // @[fetch-target-queue.scala:143:27, :320:26]
prev_pc <= bpd_pc; // @[fetch-target-queue.scala:144:27, :229:26]
end
first_empty <= ~REG_2 & first_empty; // @[fetch-target-queue.scala:201:28, :265:{16,80}, :288:17]
bpd_update_mispredict <= ~io_redirect_valid_0 & REG; // @[fetch-target-queue.scala:82:7, :213:38, :232:28, :233:27, :235:{23,52}]
bpd_update_repair <= ~io_redirect_valid_0 & (REG ? bpd_update_repair : bpd_update_mispredict | (_T | ~(bpd_update_repair & (_bpd_repair_idx_T_6[4:0] == bpd_end_idx | bpd_pc == bpd_repair_pc))) & bpd_update_repair); // @[util.scala:211:14]
end
if (_GEN_27) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_0 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_28) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_1 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_29) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_2 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_30) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_3 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_31) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_4 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_32) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_5 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_33) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_6 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_34) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_7 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_35) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_8 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_36) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_9 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_37) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_10 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_38) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_11 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_39) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_12 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_40) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_13 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_41) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_14 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_42) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_15 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_43) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_16 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_44) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_17 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_45) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_18 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_46) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_19 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_47) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_20 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_48) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_21 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_49) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_22 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_50) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_23 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_51) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_24 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_52) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_25 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_53) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_26 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_54) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_27 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_55) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_28 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_56) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_29 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_57) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_30 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_58) // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
pcs_31 <= io_enq_bits_pc_0; // @[fetch-target-queue.scala:82:7, :128:21]
if (_GEN_59) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_27) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_0_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_0_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_0_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_0_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_0_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_0_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_0_cfi_mispredicted <= _GEN_59 ? ~_GEN_27 & ram_0_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_59) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_27) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_0_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_0_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_0_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_0_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_0_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_0_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_0_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_0_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_0_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_0_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_0_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_0_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_0_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_0_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_0_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_0_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_60) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_28) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_1_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_1_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_1_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_1_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_1_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_1_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_1_cfi_mispredicted <= _GEN_60 ? ~_GEN_28 & ram_1_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_60) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_28) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_1_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_1_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_1_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_1_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_1_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_1_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_1_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_1_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_1_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_1_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_1_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_1_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_1_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_1_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_1_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_1_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_61) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_29) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_2_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_2_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_2_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_2_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_2_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_2_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_2_cfi_mispredicted <= _GEN_61 ? ~_GEN_29 & ram_2_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_61) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_29) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_2_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_2_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_2_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_2_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_2_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_2_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_2_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_2_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_2_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_2_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_2_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_2_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_2_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_2_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_2_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_2_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_62) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_30) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_3_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_3_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_3_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_3_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_3_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_3_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_3_cfi_mispredicted <= _GEN_62 ? ~_GEN_30 & ram_3_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_62) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_30) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_3_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_3_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_3_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_3_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_3_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_3_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_3_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_3_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_3_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_3_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_3_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_3_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_3_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_3_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_3_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_3_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_63) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_31) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_4_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_4_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_4_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_4_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_4_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_4_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_4_cfi_mispredicted <= _GEN_63 ? ~_GEN_31 & ram_4_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_63) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_31) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_4_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_4_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_4_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_4_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_4_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_4_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_4_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_4_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_4_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_4_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_4_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_4_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_4_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_4_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_4_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_4_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_64) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_32) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_5_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_5_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_5_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_5_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_5_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_5_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_5_cfi_mispredicted <= _GEN_64 ? ~_GEN_32 & ram_5_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_64) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_32) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_5_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_5_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_5_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_5_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_5_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_5_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_5_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_5_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_5_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_5_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_5_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_5_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_5_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_5_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_5_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_5_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_65) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_33) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_6_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_6_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_6_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_6_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_6_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_6_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_6_cfi_mispredicted <= _GEN_65 ? ~_GEN_33 & ram_6_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_65) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_33) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_6_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_6_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_6_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_6_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_6_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_6_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_6_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_6_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_6_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_6_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_6_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_6_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_6_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_6_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_6_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_6_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_66) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_34) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_7_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_7_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_7_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_7_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_7_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_7_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_7_cfi_mispredicted <= _GEN_66 ? ~_GEN_34 & ram_7_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_66) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_34) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_7_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_7_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_7_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_7_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_7_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_7_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_7_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_7_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_7_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_7_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_7_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_7_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_7_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_7_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_7_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_7_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_67) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_35) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_8_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_8_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_8_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_8_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_8_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_8_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_8_cfi_mispredicted <= _GEN_67 ? ~_GEN_35 & ram_8_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_67) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_35) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_8_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_8_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_8_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_8_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_8_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_8_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_8_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_8_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_8_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_8_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_8_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_8_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_8_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_8_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_8_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_8_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_68) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_36) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_9_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_9_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_9_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_9_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_9_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_9_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_9_cfi_mispredicted <= _GEN_68 ? ~_GEN_36 & ram_9_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_68) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_36) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_9_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_9_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_9_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_9_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_9_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_9_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_9_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_9_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_9_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_9_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_9_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_9_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_9_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_9_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_9_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_9_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_69) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_37) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_10_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_10_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_10_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_10_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_10_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_10_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_10_cfi_mispredicted <= _GEN_69 ? ~_GEN_37 & ram_10_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_69) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_37) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_10_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_10_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_10_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_10_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_10_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_10_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_10_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_10_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_10_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_10_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_10_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_10_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_10_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_10_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_10_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_10_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_70) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_38) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_11_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_11_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_11_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_11_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_11_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_11_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_11_cfi_mispredicted <= _GEN_70 ? ~_GEN_38 & ram_11_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_70) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_38) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_11_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_11_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_11_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_11_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_11_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_11_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_11_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_11_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_11_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_11_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_11_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_11_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_11_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_11_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_11_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_11_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_71) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_39) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_12_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_12_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_12_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_12_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_12_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_12_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_12_cfi_mispredicted <= _GEN_71 ? ~_GEN_39 & ram_12_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_71) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_39) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_12_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_12_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_12_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_12_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_12_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_12_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_12_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_12_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_12_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_12_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_12_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_12_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_12_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_12_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_12_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_12_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_72) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_40) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_13_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_13_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_13_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_13_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_13_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_13_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_13_cfi_mispredicted <= _GEN_72 ? ~_GEN_40 & ram_13_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_72) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_40) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_13_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_13_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_13_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_13_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_13_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_13_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_13_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_13_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_13_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_13_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_13_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_13_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_13_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_13_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_13_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_13_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_73) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_41) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_14_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_14_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_14_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_14_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_14_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_14_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_14_cfi_mispredicted <= _GEN_73 ? ~_GEN_41 & ram_14_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_73) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_41) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_14_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_14_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_14_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_14_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_14_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_14_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_14_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_14_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_14_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_14_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_14_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_14_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_14_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_14_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_14_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_14_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_74) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_42) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_15_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_15_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_15_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_15_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_15_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_15_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_15_cfi_mispredicted <= _GEN_74 ? ~_GEN_42 & ram_15_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_74) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_42) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_15_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_15_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_15_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_15_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_15_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_15_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_15_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_15_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_15_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_15_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_15_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_15_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_15_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_15_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_15_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_15_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_75) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_43) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_16_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_16_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_16_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_16_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_16_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_16_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_16_cfi_mispredicted <= _GEN_75 ? ~_GEN_43 & ram_16_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_75) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_43) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_16_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_16_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_16_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_16_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_16_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_16_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_16_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_16_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_16_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_16_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_16_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_16_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_16_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_16_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_16_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_16_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_76) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_44) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_17_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_17_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_17_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_17_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_17_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_17_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_17_cfi_mispredicted <= _GEN_76 ? ~_GEN_44 & ram_17_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_76) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_44) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_17_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_17_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_17_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_17_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_17_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_17_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_17_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_17_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_17_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_17_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_17_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_17_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_17_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_17_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_17_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_17_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_77) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_45) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_18_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_18_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_18_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_18_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_18_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_18_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_18_cfi_mispredicted <= _GEN_77 ? ~_GEN_45 & ram_18_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_77) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_45) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_18_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_18_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_18_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_18_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_18_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_18_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_18_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_18_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_18_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_18_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_18_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_18_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_18_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_18_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_18_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_18_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_78) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_46) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_19_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_19_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_19_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_19_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_19_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_19_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_19_cfi_mispredicted <= _GEN_78 ? ~_GEN_46 & ram_19_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_78) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_46) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_19_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_19_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_19_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_19_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_19_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_19_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_19_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_19_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_19_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_19_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_19_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_19_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_19_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_19_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_19_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_19_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_79) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_47) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_20_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_20_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_20_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_20_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_20_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_20_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_20_cfi_mispredicted <= _GEN_79 ? ~_GEN_47 & ram_20_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_79) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_47) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_20_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_20_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_20_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_20_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_20_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_20_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_20_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_20_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_20_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_20_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_20_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_20_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_20_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_20_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_20_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_20_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_80) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_48) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_21_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_21_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_21_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_21_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_21_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_21_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_21_cfi_mispredicted <= _GEN_80 ? ~_GEN_48 & ram_21_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_80) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_48) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_21_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_21_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_21_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_21_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_21_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_21_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_21_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_21_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_21_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_21_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_21_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_21_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_21_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_21_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_21_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_21_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_81) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_49) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_22_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_22_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_22_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_22_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_22_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_22_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_22_cfi_mispredicted <= _GEN_81 ? ~_GEN_49 & ram_22_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_81) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_49) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_22_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_22_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_22_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_22_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_22_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_22_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_22_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_22_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_22_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_22_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_22_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_22_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_22_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_22_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_22_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_22_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_82) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_50) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_23_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_23_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_23_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_23_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_23_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_23_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_23_cfi_mispredicted <= _GEN_82 ? ~_GEN_50 & ram_23_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_82) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_50) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_23_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_23_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_23_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_23_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_23_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_23_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_23_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_23_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_23_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_23_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_23_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_23_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_23_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_23_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_23_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_23_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_83) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_51) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_24_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_24_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_24_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_24_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_24_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_24_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_24_cfi_mispredicted <= _GEN_83 ? ~_GEN_51 & ram_24_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_83) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_51) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_24_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_24_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_24_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_24_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_24_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_24_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_24_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_24_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_24_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_24_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_24_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_24_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_24_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_24_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_24_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_24_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_84) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_52) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_25_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_25_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_25_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_25_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_25_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_25_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_25_cfi_mispredicted <= _GEN_84 ? ~_GEN_52 & ram_25_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_84) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_52) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_25_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_25_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_25_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_25_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_25_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_25_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_25_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_25_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_25_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_25_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_25_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_25_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_25_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_25_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_25_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_25_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_85) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_53) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_26_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_26_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_26_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_26_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_26_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_26_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_26_cfi_mispredicted <= _GEN_85 ? ~_GEN_53 & ram_26_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_85) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_53) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_26_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_26_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_26_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_26_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_26_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_26_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_26_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_26_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_26_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_26_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_26_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_26_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_26_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_26_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_26_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_26_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_86) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_54) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_27_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_27_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_27_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_27_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_27_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_27_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_27_cfi_mispredicted <= _GEN_86 ? ~_GEN_54 & ram_27_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_86) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_54) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_27_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_27_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_27_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_27_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_27_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_27_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_27_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_27_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_27_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_27_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_27_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_27_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_27_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_27_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_27_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_27_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_87) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_55) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_28_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_28_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_28_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_28_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_28_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_28_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_28_cfi_mispredicted <= _GEN_87 ? ~_GEN_55 & ram_28_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_87) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_55) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_28_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_28_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_28_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_28_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_28_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_28_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_28_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_28_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_28_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_28_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_28_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_28_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_28_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_28_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_28_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_28_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_88) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_56) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_29_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_29_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_29_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_29_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_29_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_29_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_29_cfi_mispredicted <= _GEN_88 ? ~_GEN_56 & ram_29_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_88) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_56) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_29_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_29_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_29_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_29_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_29_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_29_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_29_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_29_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_29_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_29_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_29_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_29_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_29_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_29_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_29_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_29_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_89) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_57) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_30_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_30_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_30_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_30_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_30_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_30_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_30_cfi_mispredicted <= _GEN_89 ? ~_GEN_57 & ram_30_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_89) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_57) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_30_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_30_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_30_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_30_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_30_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_30_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_30_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_30_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_30_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_30_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_30_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_30_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_30_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_30_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_30_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_30_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
if (_GEN_90) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_58) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_31_cfi_idx_valid <= new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :149:25]
ram_31_cfi_idx_bits <= new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :149:25]
ram_31_cfi_taken <= new_entry_cfi_taken; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_31_cfi_idx_valid <= ram_REG_cfi_idx_valid; // @[fetch-target-queue.scala:130:21, :324:46]
ram_31_cfi_idx_bits <= ram_REG_cfi_idx_bits; // @[fetch-target-queue.scala:130:21, :324:46]
ram_31_cfi_taken <= ram_REG_cfi_taken; // @[fetch-target-queue.scala:130:21, :324:46]
end
ram_31_cfi_mispredicted <= _GEN_90 ? ~_GEN_58 & ram_31_cfi_mispredicted : ram_REG_cfi_mispredicted; // @[fetch-target-queue.scala:128:21, :130:21, :145:17, :147:28, :182:18, :301:28, :319:44, :324:46]
if (_GEN_90) begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
if (_GEN_58) begin // @[fetch-target-queue.scala:128:21, :145:17, :147:28]
ram_31_cfi_type <= new_entry_cfi_type; // @[fetch-target-queue.scala:130:21, :149:25]
ram_31_br_mask <= new_entry_br_mask; // @[fetch-target-queue.scala:130:21, :149:25]
ram_31_cfi_is_call <= new_entry_cfi_is_call; // @[fetch-target-queue.scala:130:21, :149:25]
ram_31_cfi_is_ret <= new_entry_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :149:25]
ram_31_cfi_npc_plus4 <= new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :149:25]
ram_31_ras_top <= new_entry_ras_top; // @[fetch-target-queue.scala:130:21, :149:25]
ram_31_ras_idx <= new_entry_ras_idx; // @[fetch-target-queue.scala:130:21, :149:25]
ram_31_start_bank <= new_entry_start_bank; // @[fetch-target-queue.scala:130:21, :149:25]
end
end
else begin // @[fetch-target-queue.scala:145:17, :301:28, :319:44]
ram_31_cfi_type <= ram_REG_cfi_type; // @[fetch-target-queue.scala:130:21, :324:46]
ram_31_br_mask <= ram_REG_br_mask; // @[fetch-target-queue.scala:130:21, :324:46]
ram_31_cfi_is_call <= ram_REG_cfi_is_call; // @[fetch-target-queue.scala:130:21, :324:46]
ram_31_cfi_is_ret <= ram_REG_cfi_is_ret; // @[fetch-target-queue.scala:130:21, :324:46]
ram_31_cfi_npc_plus4 <= ram_REG_cfi_npc_plus4; // @[fetch-target-queue.scala:130:21, :324:46]
ram_31_ras_top <= ram_REG_ras_top; // @[fetch-target-queue.scala:130:21, :324:46]
ram_31_ras_idx <= ram_REG_ras_idx; // @[fetch-target-queue.scala:130:21, :324:46]
ram_31_start_bank <= ram_REG_start_bank; // @[fetch-target-queue.scala:130:21, :324:46]
end
io_ras_update_REG <= ras_update; // @[fetch-target-queue.scala:206:28, :209:31]
io_ras_update_pc_REG <= ras_update_pc; // @[fetch-target-queue.scala:207:31, :210:31]
io_ras_update_idx_REG <= ras_update_idx; // @[fetch-target-queue.scala:208:32, :211:31]
if (io_redirect_valid_0) begin // @[fetch-target-queue.scala:82:7]
end
else if (REG) // @[fetch-target-queue.scala:235:23]
bpd_repair_idx <= bpd_repair_idx_REG; // @[fetch-target-queue.scala:215:27, :237:37]
else if (bpd_update_mispredict) // @[fetch-target-queue.scala:213:38]
bpd_repair_idx <= _bpd_repair_idx_T_2; // @[util.scala:211:20]
else if (_T) // @[fetch-target-queue.scala:243:34]
bpd_repair_idx <= _bpd_repair_idx_T_5; // @[util.scala:211:20]
else if (bpd_update_repair) // @[fetch-target-queue.scala:214:34]
bpd_repair_idx <= _bpd_repair_idx_T_8; // @[util.scala:211:20]
if (io_redirect_valid_0 | ~REG) begin // @[fetch-target-queue.scala:82:7, :216:24, :232:28, :235:{23,52}]
end
else // @[fetch-target-queue.scala:216:24, :232:28, :235:52]
bpd_end_idx <= bpd_end_idx_REG; // @[fetch-target-queue.scala:216:24, :238:37]
if (io_redirect_valid_0 | REG | bpd_update_mispredict | ~_T) begin // @[fetch-target-queue.scala:82:7, :213:38, :217:26, :232:28, :235:{23,52}, :239:39, :243:{34,69}]
end
else // @[fetch-target-queue.scala:217:26, :232:28, :235:52, :239:39, :243:69]
bpd_repair_pc <= bpd_pc; // @[fetch-target-queue.scala:217:26, :229:26]
bpd_entry_cfi_idx_valid <= _GEN_7[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_cfi_idx_bits <= _GEN_8[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_cfi_taken <= _GEN_9[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_cfi_mispredicted <= _GEN_10[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_cfi_type <= _GEN_11[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_br_mask <= _GEN_12[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_cfi_is_call <= _GEN_13[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_cfi_is_ret <= _GEN_14[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_cfi_npc_plus4 <= _GEN_15[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_ras_top <= _GEN_16[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_ras_idx <= _GEN_17[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_entry_start_bank <= _GEN_18[bpd_idx]; // @[fetch-target-queue.scala:219:20, :221:26]
bpd_pc <= _GEN_19[bpd_idx]; // @[fetch-target-queue.scala:219:20, :229:26]
bpd_target <= _GEN_19[_bpd_target_T_2]; // @[util.scala:211:20]
REG <= io_brupdate_b2_mispredict_0; // @[fetch-target-queue.scala:82:7, :235:23]
bpd_repair_idx_REG <= io_brupdate_b2_uop_ftq_idx_0; // @[fetch-target-queue.scala:82:7, :237:37]
bpd_end_idx_REG <= enq_ptr; // @[fetch-target-queue.scala:122:27, :238:37]
REG_1 <= bpd_update_mispredict; // @[fetch-target-queue.scala:213:38, :243:44]
do_commit_update_REG <= io_redirect_valid_0; // @[fetch-target-queue.scala:82:7, :261:61]
REG_2 <= do_commit_update | bpd_update_repair | bpd_update_mispredict; // @[fetch-target-queue.scala:213:38, :214:34, :261:50, :265:{16,34,54}]
io_bpdupdate_valid_REG <= bpd_update_repair; // @[fetch-target-queue.scala:214:34, :271:37]
io_bpdupdate_bits_is_mispredict_update_REG <= bpd_update_mispredict; // @[fetch-target-queue.scala:213:38, :272:54]
io_bpdupdate_bits_is_repair_update_REG <= bpd_update_repair; // @[fetch-target-queue.scala:214:34, :273:54]
io_enq_ready_REG <= _io_enq_ready_T_1; // @[fetch-target-queue.scala:295:{26,33}]
REG_3 <= io_redirect_valid_0; // @[fetch-target-queue.scala:82:7, :319:23]
prev_entry_REG_cfi_idx_valid <= redirect_new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_cfi_idx_bits <= redirect_new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_cfi_taken <= redirect_new_entry_cfi_taken; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_cfi_mispredicted <= redirect_new_entry_cfi_mispredicted; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_cfi_type <= redirect_new_entry_cfi_type; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_br_mask <= redirect_new_entry_br_mask; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_cfi_is_call <= redirect_new_entry_cfi_is_call; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_cfi_is_ret <= redirect_new_entry_cfi_is_ret; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_cfi_npc_plus4 <= redirect_new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_ras_top <= redirect_new_entry_ras_top; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_ras_idx <= redirect_new_entry_ras_idx; // @[fetch-target-queue.scala:299:36, :320:26]
prev_entry_REG_start_bank <= redirect_new_entry_start_bank; // @[fetch-target-queue.scala:299:36, :320:26]
REG_4 <= io_redirect_bits_0; // @[fetch-target-queue.scala:82:7, :324:16]
ram_REG_cfi_idx_valid <= redirect_new_entry_cfi_idx_valid; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_cfi_idx_bits <= redirect_new_entry_cfi_idx_bits; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_cfi_taken <= redirect_new_entry_cfi_taken; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_cfi_mispredicted <= redirect_new_entry_cfi_mispredicted; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_cfi_type <= redirect_new_entry_cfi_type; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_br_mask <= redirect_new_entry_br_mask; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_cfi_is_call <= redirect_new_entry_cfi_is_call; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_cfi_is_ret <= redirect_new_entry_cfi_is_ret; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_cfi_npc_plus4 <= redirect_new_entry_cfi_npc_plus4; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_ras_top <= redirect_new_entry_ras_top; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_ras_idx <= redirect_new_entry_ras_idx; // @[fetch-target-queue.scala:299:36, :324:46]
ram_REG_start_bank <= redirect_new_entry_start_bank; // @[fetch-target-queue.scala:299:36, :324:46]
io_rrd_ftq_resps_0_entry_REG_cfi_idx_valid <= _GEN_7[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_cfi_idx_bits <= _GEN_8[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_cfi_taken <= _GEN_9[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_cfi_mispredicted <= _GEN_10[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_cfi_type <= _GEN_11[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_br_mask <= _GEN_12[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_cfi_is_call <= _GEN_13[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_cfi_is_ret <= _GEN_14[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_cfi_npc_plus4 <= _GEN_15[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_ras_top <= _GEN_16[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_ras_idx <= _GEN_17[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_entry_REG_start_bank <= _GEN_18[idx]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_0_pc_REG <= _io_rrd_ftq_resps_0_pc_T; // @[fetch-target-queue.scala:342:{45,49}]
io_rrd_ftq_resps_0_valid_REG <= _io_rrd_ftq_resps_0_valid_T_1; // @[fetch-target-queue.scala:343:{45,62}]
io_rrd_ftq_resps_1_entry_REG_cfi_idx_valid <= _GEN_7[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_cfi_idx_bits <= _GEN_8[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_cfi_taken <= _GEN_9[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_cfi_mispredicted <= _GEN_10[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_cfi_type <= _GEN_11[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_br_mask <= _GEN_12[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_cfi_is_call <= _GEN_13[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_cfi_is_ret <= _GEN_14[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_cfi_npc_plus4 <= _GEN_15[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_ras_top <= _GEN_16[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_ras_idx <= _GEN_17[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_entry_REG_start_bank <= _GEN_18[idx_1]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_1_pc_REG <= _io_rrd_ftq_resps_1_pc_T; // @[fetch-target-queue.scala:342:{45,49}]
io_rrd_ftq_resps_1_valid_REG <= _io_rrd_ftq_resps_1_valid_T_1; // @[fetch-target-queue.scala:343:{45,62}]
io_rrd_ftq_resps_2_entry_REG_cfi_idx_valid <= _GEN_7[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_cfi_idx_bits <= _GEN_8[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_cfi_taken <= _GEN_9[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_cfi_mispredicted <= _GEN_10[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_cfi_type <= _GEN_11[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_br_mask <= _GEN_12[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_cfi_is_call <= _GEN_13[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_cfi_is_ret <= _GEN_14[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_cfi_npc_plus4 <= _GEN_15[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_ras_top <= _GEN_16[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_ras_idx <= _GEN_17[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_entry_REG_start_bank <= _GEN_18[idx_2]; // @[fetch-target-queue.scala:221:26, :332:18, :336:45]
io_rrd_ftq_resps_2_pc_REG <= _io_rrd_ftq_resps_2_pc_T; // @[fetch-target-queue.scala:342:{45,49}]
io_rrd_ftq_resps_2_valid_REG <= _io_rrd_ftq_resps_2_valid_T_1; // @[fetch-target-queue.scala:343:{45,62}]
io_com_pc_REG <= _GEN_19[_io_com_pc_T]; // @[fetch-target-queue.scala:229:26, :346:{23,31}]
io_debug_fetch_pc_0_REG <= pcs_0; // @[fetch-target-queue.scala:128:21, :349:36]
io_debug_fetch_pc_1_REG <= pcs_0; // @[fetch-target-queue.scala:128:21, :349:36]
io_debug_fetch_pc_2_REG <= pcs_0; // @[fetch-target-queue.scala:128:21, :349:36]
always @(posedge)
meta meta ( // @[fetch-target-queue.scala:129:29]
.R0_addr (_bpd_meta_WIRE), // @[fetch-target-queue.scala:228:28]
.R0_clk (clock),
.R0_data (_meta_R0_data),
.W0_addr (enq_ptr), // @[fetch-target-queue.scala:122:27]
.W0_en (do_enq), // @[Decoupled.scala:51:35]
.W0_clk (clock),
.W0_data ({io_enq_bits_bpd_meta_1_0, io_enq_bits_bpd_meta_0_0}) // @[fetch-target-queue.scala:82:7, :129:29]
); // @[fetch-target-queue.scala:129:29]
ghist_0 ghist_0 ( // @[fetch-target-queue.scala:131:43]
.R0_addr (_bpd_ghist_WIRE), // @[fetch-target-queue.scala:222:32]
.R0_clk (clock),
.R0_data (_ghist_0_R0_data),
.W0_addr (enq_ptr), // @[fetch-target-queue.scala:122:27]
.W0_en (do_enq), // @[Decoupled.scala:51:35]
.W0_clk (clock),
.W0_data (_GEN_0) // @[fetch-target-queue.scala:131:43]
); // @[fetch-target-queue.scala:131:43]
ghist_1 ghist_1 ( // @[fetch-target-queue.scala:131:43]
.R0_addr (_io_rrd_ftq_resps_0_ghist_WIRE), // @[fetch-target-queue.scala:338:51]
.R0_clk (clock),
.R0_data (_ghist_1_R0_data),
.W0_addr (enq_ptr), // @[fetch-target-queue.scala:122:27]
.W0_en (do_enq), // @[Decoupled.scala:51:35]
.W0_clk (clock),
.W0_data (_GEN_0) // @[fetch-target-queue.scala:131:43]
); // @[fetch-target-queue.scala:131:43]
assign io_enq_ready = io_enq_ready_0; // @[fetch-target-queue.scala:82:7]
assign io_enq_idx = io_enq_idx_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_valid = io_rrd_ftq_resps_0_valid_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_cfi_idx_valid = io_rrd_ftq_resps_0_entry_cfi_idx_valid_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_cfi_idx_bits = io_rrd_ftq_resps_0_entry_cfi_idx_bits_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_cfi_taken = io_rrd_ftq_resps_0_entry_cfi_taken_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_cfi_mispredicted = io_rrd_ftq_resps_0_entry_cfi_mispredicted_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_cfi_type = io_rrd_ftq_resps_0_entry_cfi_type_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_br_mask = io_rrd_ftq_resps_0_entry_br_mask_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_cfi_is_call = io_rrd_ftq_resps_0_entry_cfi_is_call_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_cfi_is_ret = io_rrd_ftq_resps_0_entry_cfi_is_ret_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_cfi_npc_plus4 = io_rrd_ftq_resps_0_entry_cfi_npc_plus4_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_ras_top = io_rrd_ftq_resps_0_entry_ras_top_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_ras_idx = io_rrd_ftq_resps_0_entry_ras_idx_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_entry_start_bank = io_rrd_ftq_resps_0_entry_start_bank_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_ghist_old_history = io_rrd_ftq_resps_0_ghist_old_history_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_ghist_current_saw_branch_not_taken = io_rrd_ftq_resps_0_ghist_current_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_ghist_new_saw_branch_not_taken = io_rrd_ftq_resps_0_ghist_new_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_ghist_new_saw_branch_taken = io_rrd_ftq_resps_0_ghist_new_saw_branch_taken_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_ghist_ras_idx = io_rrd_ftq_resps_0_ghist_ras_idx_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_0_pc = io_rrd_ftq_resps_0_pc_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_valid = io_rrd_ftq_resps_1_valid_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_cfi_idx_valid = io_rrd_ftq_resps_1_entry_cfi_idx_valid_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_cfi_idx_bits = io_rrd_ftq_resps_1_entry_cfi_idx_bits_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_cfi_taken = io_rrd_ftq_resps_1_entry_cfi_taken_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_cfi_mispredicted = io_rrd_ftq_resps_1_entry_cfi_mispredicted_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_cfi_type = io_rrd_ftq_resps_1_entry_cfi_type_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_br_mask = io_rrd_ftq_resps_1_entry_br_mask_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_cfi_is_call = io_rrd_ftq_resps_1_entry_cfi_is_call_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_cfi_is_ret = io_rrd_ftq_resps_1_entry_cfi_is_ret_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_cfi_npc_plus4 = io_rrd_ftq_resps_1_entry_cfi_npc_plus4_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_ras_top = io_rrd_ftq_resps_1_entry_ras_top_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_ras_idx = io_rrd_ftq_resps_1_entry_ras_idx_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_entry_start_bank = io_rrd_ftq_resps_1_entry_start_bank_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_1_pc = io_rrd_ftq_resps_1_pc_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_valid = io_rrd_ftq_resps_2_valid_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_cfi_idx_valid = io_rrd_ftq_resps_2_entry_cfi_idx_valid_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_cfi_idx_bits = io_rrd_ftq_resps_2_entry_cfi_idx_bits_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_cfi_taken = io_rrd_ftq_resps_2_entry_cfi_taken_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_cfi_mispredicted = io_rrd_ftq_resps_2_entry_cfi_mispredicted_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_cfi_type = io_rrd_ftq_resps_2_entry_cfi_type_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_br_mask = io_rrd_ftq_resps_2_entry_br_mask_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_cfi_is_call = io_rrd_ftq_resps_2_entry_cfi_is_call_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_cfi_is_ret = io_rrd_ftq_resps_2_entry_cfi_is_ret_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_cfi_npc_plus4 = io_rrd_ftq_resps_2_entry_cfi_npc_plus4_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_ras_top = io_rrd_ftq_resps_2_entry_ras_top_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_ras_idx = io_rrd_ftq_resps_2_entry_ras_idx_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_entry_start_bank = io_rrd_ftq_resps_2_entry_start_bank_0; // @[fetch-target-queue.scala:82:7]
assign io_rrd_ftq_resps_2_pc = io_rrd_ftq_resps_2_pc_0; // @[fetch-target-queue.scala:82:7]
assign io_com_pc = io_com_pc_0; // @[fetch-target-queue.scala:82:7]
assign io_debug_fetch_pc_0 = io_debug_fetch_pc_0_0; // @[fetch-target-queue.scala:82:7]
assign io_debug_fetch_pc_1 = io_debug_fetch_pc_1_0; // @[fetch-target-queue.scala:82:7]
assign io_debug_fetch_pc_2 = io_debug_fetch_pc_2_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_valid = io_bpdupdate_valid_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_is_mispredict_update = io_bpdupdate_bits_is_mispredict_update_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_is_repair_update = io_bpdupdate_bits_is_repair_update_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_pc = io_bpdupdate_bits_pc_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_br_mask = io_bpdupdate_bits_br_mask_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_cfi_idx_valid = io_bpdupdate_bits_cfi_idx_valid_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_cfi_idx_bits = io_bpdupdate_bits_cfi_idx_bits_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_cfi_taken = io_bpdupdate_bits_cfi_taken_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_cfi_mispredicted = io_bpdupdate_bits_cfi_mispredicted_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_cfi_is_br = io_bpdupdate_bits_cfi_is_br_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_cfi_is_jal = io_bpdupdate_bits_cfi_is_jal_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_ghist_old_history = io_bpdupdate_bits_ghist_old_history_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_ghist_current_saw_branch_not_taken = io_bpdupdate_bits_ghist_current_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_ghist_new_saw_branch_not_taken = io_bpdupdate_bits_ghist_new_saw_branch_not_taken_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_ghist_new_saw_branch_taken = io_bpdupdate_bits_ghist_new_saw_branch_taken_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_ghist_ras_idx = io_bpdupdate_bits_ghist_ras_idx_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_target = io_bpdupdate_bits_target_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_meta_0 = io_bpdupdate_bits_meta_0_0; // @[fetch-target-queue.scala:82:7]
assign io_bpdupdate_bits_meta_1 = io_bpdupdate_bits_meta_1_0; // @[fetch-target-queue.scala:82:7]
assign io_ras_update = io_ras_update_0; // @[fetch-target-queue.scala:82:7]
assign io_ras_update_idx = io_ras_update_idx_0; // @[fetch-target-queue.scala:82:7]
assign io_ras_update_pc = io_ras_update_pc_0; // @[fetch-target-queue.scala:82:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_66( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_76 io_out_sink_extend ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File IngressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class IngressUnit(
ingressNodeId: Int,
cParam: IngressChannelParams,
outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean,
combineSAST: Boolean,
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits)))
}
val io = IO(new IngressUnitIO)
val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2,
flow=combineRCVA))
assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR))
route_buffer.io.enq.bits.head := io.in.bits.head
route_buffer.io.enq.bits.tail := io.in.bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
route_buffer.io.enq.bits.flow := DontCare
} else {
route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U
route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U
route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U
route_buffer.io.enq.bits.flow.egress_node := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNode.U)
)
route_buffer.io.enq.bits.flow.egress_node_id := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNodeId.U)
)
}
route_buffer.io.enq.bits.payload := io.in.bits.payload
route_buffer.io.enq.bits.virt_channel_id := DontCare
io.router_req.bits.src_virt_id := 0.U
io.router_req.bits.flow := route_buffer.io.enq.bits.flow
val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U
route_buffer.io.enq.valid := io.in.valid && (
io.router_req.ready || !io.in.bits.head || at_dest)
io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest
io.in.ready := route_buffer.io.enq.ready && (
io.router_req.ready || !io.in.bits.head || at_dest)
route_q.io.enq.valid := io.router_req.fire
route_q.io.enq.bits := io.router_resp
when (io.in.fire && io.in.bits.head && at_dest) {
route_q.io.enq.valid := true.B
route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (egressParams(o).egressId.U === io.in.bits.egress_id) {
route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B
}
}
}
assert(!(route_q.io.enq.valid && !route_q.io.enq.ready))
val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams),
1, pipe=true))
vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits
io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel
io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow
io.vcalloc_req.bits.in_vc := 0.U
val head = route_buffer.io.deq.bits.head
val tail = route_buffer.io.deq.bits.tail
vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head)
)
io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid &&
head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready)
route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head) &&
(vcalloc_q.io.enq.ready || !head))
route_q.io.deq.ready := (route_buffer.io.deq.fire && tail)
vcalloc_q.io.enq.valid := io.vcalloc_req.fire
vcalloc_q.io.enq.bits := io.vcalloc_resp
assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready))
io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel
io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail
val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail
io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire
val out_bundle = if (combineSAST) {
Wire(Valid(new SwitchBundle(outParams, egressParams)))
} else {
Reg(Valid(new SwitchBundle(outParams, egressParams)))
}
io.out(0) := out_bundle
out_bundle.valid := vcalloc_buffer.io.deq.fire
out_bundle.bits.flit := vcalloc_buffer.io.deq.bits
out_bundle.bits.flit.virt_channel_id := 0.U
val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq
out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh,
vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq)
io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready
io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready
// TODO: We should not generate input/ingress/output/egress units for untraversable channels
if (!cParam.traversable) {
io.in.ready := false.B
io.router_req.valid := false.B
io.router_req.bits := DontCare
io.vcalloc_req.valid := false.B
io.vcalloc_req.bits := DontCare
io.salloc_req.foreach(_.valid := false.B)
io.salloc_req.foreach(_.bits := DontCare)
io.out.foreach(_.valid := false.B)
io.out.foreach(_.bits := DontCare)
}
}
| module IngressUnit_14( // @[IngressUnit.scala:11:7]
input clock, // @[IngressUnit.scala:11:7]
input reset, // @[IngressUnit.scala:11:7]
input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_3_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_3_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_3, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_4, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_5, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_6, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_7, // @[IngressUnit.scala:24:14]
input io_out_credit_available_3_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_1_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_2, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_3, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_4, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_5, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_6, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_7, // @[IngressUnit.scala:24:14]
input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_3_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14]
output io_out_0_valid, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14]
output [72:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14]
output [4:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14]
output [4:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14]
output io_in_ready, // @[IngressUnit.scala:24:14]
input io_in_valid, // @[IngressUnit.scala:24:14]
input io_in_bits_head, // @[IngressUnit.scala:24:14]
input io_in_bits_tail, // @[IngressUnit.scala:24:14]
input [72:0] io_in_bits_payload, // @[IngressUnit.scala:24:14]
input [4:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14]
);
wire _GEN; // @[Decoupled.scala:51:35]
wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_3_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_2; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_3; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_4; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_5; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_6; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_7; // @[IngressUnit.scala:76:25]
wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_head; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30]
wire [72:0] _vcalloc_buffer_io_deq_bits_payload; // @[IngressUnit.scala:75:30]
wire [2:0] _vcalloc_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:75:30]
wire [4:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:75:30]
wire [1:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:75:30]
wire [4:0] _vcalloc_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:75:30]
wire [1:0] _vcalloc_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:75:30]
wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23]
wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23]
wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28]
wire [72:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28]
wire [4:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28]
wire [4:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T = io_in_bits_egress_id == 5'hC; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_1 = io_in_bits_egress_id == 5'hA; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_2 = io_in_bits_egress_id == 5'h2; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_3 = io_in_bits_egress_id == 5'h8; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_4 = io_in_bits_egress_id == 5'h4; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_5 = io_in_bits_egress_id == 5'hE; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_6 = io_in_bits_egress_id == 5'h10; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_7 = io_in_bits_egress_id == 5'h6; // @[IngressUnit.scala:30:72]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_16 = (_route_buffer_io_enq_bits_flow_egress_node_id_T ? 4'hB : 4'h0) | {_route_buffer_io_enq_bits_flow_egress_node_id_T_1, 3'h0}; // @[Mux.scala:30:73]
wire [2:0] _GEN_0 = {_route_buffer_io_enq_bits_flow_egress_node_T_16[2:1], _route_buffer_io_enq_bits_flow_egress_node_T_16[0] | _route_buffer_io_enq_bits_flow_egress_node_id_T_2} | {3{_route_buffer_io_enq_bits_flow_egress_node_id_T_3}}; // @[Mux.scala:30:73]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_21 = {_route_buffer_io_enq_bits_flow_egress_node_T_16[3], _GEN_0[2], _GEN_0[1:0] | {_route_buffer_io_enq_bits_flow_egress_node_id_T_4, 1'h0}} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_5 ? 4'hD : 4'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_6 ? 4'hE : 4'h0); // @[Mux.scala:30:73]
wire [2:0] _GEN_1 = _route_buffer_io_enq_bits_flow_egress_node_T_21[2:0] | {_route_buffer_io_enq_bits_flow_egress_node_id_T_7, 2'h0}; // @[Mux.scala:30:73]
wire [3:0] _GEN_2 = {_route_buffer_io_enq_bits_flow_egress_node_T_21[3], _GEN_1}; // @[Mux.scala:30:73]
assign _GEN = _route_buffer_io_enq_ready & io_in_valid & io_in_bits_head & _GEN_2 == 4'h5; // @[Decoupled.scala:51:35]
wire route_q_io_enq_valid = _GEN | io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head & _GEN_2 != 4'h5; // @[Decoupled.scala:51:35]
wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}]
wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29]
wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_52( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_69 io_out_sink_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_432( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid, // @[PE.scala:35:14]
output io_bad_dataflow // @[PE.scala:35:14]
);
wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24]
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [31:0] c1; // @[PE.scala:70:15]
wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [31:0] c2; // @[PE.scala:71:15]
wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25]
wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61]
wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38]
wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38]
assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16]
assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10]
wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10]
c1 <= _GEN_7; // @[PE.scala:70:15, :124:10]
if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30]
end
else // @[PE.scala:71:15, :118:101, :119:30]
c2 <= _GEN_7; // @[PE.scala:71:15, :124:10]
end
else begin // @[PE.scala:31:7]
c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10]
c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10]
end
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
end
always @(posedge)
MacUnit_176 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24]
.io_out_d (_mac_unit_io_out_d)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_51( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0 // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
PE_307 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File UserYanker.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.amba.axi4
import chisel3._
import chisel3.util.{Queue, QueueIO, UIntToOH}
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp}
import freechips.rocketchip.util.BundleMap
/** This adapter prunes all user bit fields of the echo type from request messages,
* storing them in queues and echoing them back when matching response messages are received.
*
* It also optionally rate limits the number of transactions that can be in flight simultaneously
* per FIFO domain / A[W|R]ID.
*
* @param capMaxFlight is an optional maximum number of transactions that can be in flight per A[W|R]ID.
*/
class AXI4UserYanker(capMaxFlight: Option[Int] = None)(implicit p: Parameters) extends LazyModule
{
val node = AXI4AdapterNode(
masterFn = { mp => mp.copy(
masters = mp.masters.map { m => m.copy(
maxFlight = (m.maxFlight, capMaxFlight) match {
case (Some(x), Some(y)) => Some(x min y)
case (Some(x), None) => Some(x)
case (None, Some(y)) => Some(y)
case (None, None) => None })},
echoFields = Nil)},
slaveFn = { sp => sp })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// Which fields are we stripping?
val echoFields = edgeIn.master.echoFields
val need_bypass = edgeOut.slave.minLatency < 1
edgeOut.master.masters.foreach { m =>
require (m.maxFlight.isDefined, "UserYanker needs a flight cap on each ID")
}
def queue(id: Int) = {
val depth = edgeOut.master.masters.find(_.id.contains(id)).flatMap(_.maxFlight).getOrElse(0)
if (depth == 0) {
Wire(new QueueIO(BundleMap(echoFields), 1)) // unused ID => undefined value
} else {
Module(new Queue(BundleMap(echoFields), depth, flow=need_bypass)).io
}
}
val rqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) }
val wqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) }
val arid = in.ar.bits.id
val ar_ready = VecInit(rqueues.map(_.enq.ready))(arid)
in .ar.ready := out.ar.ready && ar_ready
out.ar.valid := in .ar.valid && ar_ready
Connectable.waiveUnmatched(out.ar.bits, in.ar.bits) match {
case (lhs, rhs) => lhs :<= rhs
}
val rid = out.r.bits.id
val r_valid = VecInit(rqueues.map(_.deq.valid))(rid)
val r_bits = VecInit(rqueues.map(_.deq.bits))(rid)
assert (!out.r.valid || r_valid) // Q must be ready faster than the response
Connectable.waiveUnmatched(in.r, out.r) match {
case (lhs, rhs) => lhs :<>= rhs
}
in.r.bits.echo :<= r_bits
val arsel = UIntToOH(arid, edgeIn.master.endId).asBools
val rsel = UIntToOH(rid, edgeIn.master.endId).asBools
(rqueues zip (arsel zip rsel)) foreach { case (q, (ar, r)) =>
q.deq.ready := out.r .valid && in .r .ready && r && out.r.bits.last
q.deq.valid := DontCare
q.deq.bits := DontCare
q.enq.valid := in .ar.valid && out.ar.ready && ar
q.enq.ready := DontCare
q.enq.bits :<>= in.ar.bits.echo
q.count := DontCare
}
val awid = in.aw.bits.id
val aw_ready = VecInit(wqueues.map(_.enq.ready))(awid)
in .aw.ready := out.aw.ready && aw_ready
out.aw.valid := in .aw.valid && aw_ready
Connectable.waiveUnmatched(out.aw.bits, in.aw.bits) match {
case (lhs, rhs) => lhs :<>= rhs
}
val bid = out.b.bits.id
val b_valid = VecInit(wqueues.map(_.deq.valid))(bid)
val b_bits = VecInit(wqueues.map(_.deq.bits))(bid)
assert (!out.b.valid || b_valid) // Q must be ready faster than the response
Connectable.waiveUnmatched(in.b, out.b) match {
case (lhs, rhs) => lhs :<>= rhs
}
in.b.bits.echo :<>= b_bits
val awsel = UIntToOH(awid, edgeIn.master.endId).asBools
val bsel = UIntToOH(bid, edgeIn.master.endId).asBools
(wqueues zip (awsel zip bsel)) foreach { case (q, (aw, b)) =>
q.deq.ready := out.b .valid && in .b .ready && b
q.deq.valid := DontCare
q.deq.bits := DontCare
q.enq.valid := in .aw.valid && out.aw.ready && aw
q.enq.ready := DontCare
q.enq.bits :<>= in.aw.bits.echo
q.count := DontCare
}
out.w :<>= in.w
}
}
}
object AXI4UserYanker
{
def apply(capMaxFlight: Option[Int] = None)(implicit p: Parameters): AXI4Node =
{
val axi4yank = LazyModule(new AXI4UserYanker(capMaxFlight))
axi4yank.node
}
}
| module AXI4UserYanker( // @[UserYanker.scala:36:9]
input clock, // @[UserYanker.scala:36:9]
input reset, // @[UserYanker.scala:36:9]
output auto_in_aw_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_aw_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_aw_bits_id, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_aw_bits_addr, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_aw_bits_len, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_aw_bits_size, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_in_aw_bits_burst, // @[LazyModuleImp.scala:107:25]
input auto_in_aw_bits_lock, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_aw_bits_cache, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_aw_bits_prot, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_aw_bits_qos, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_aw_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_aw_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
output auto_in_w_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_w_valid, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_w_bits_data, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_w_bits_strb, // @[LazyModuleImp.scala:107:25]
input auto_in_w_bits_last, // @[LazyModuleImp.scala:107:25]
input auto_in_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_b_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_b_bits_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_b_bits_resp, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_b_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_b_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
output auto_in_ar_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_ar_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_ar_bits_id, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_ar_bits_addr, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_ar_bits_len, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_ar_bits_size, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_in_ar_bits_burst, // @[LazyModuleImp.scala:107:25]
input auto_in_ar_bits_lock, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_ar_bits_cache, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_ar_bits_prot, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_ar_bits_qos, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_ar_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_ar_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
input auto_in_r_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_r_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_r_bits_id, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_r_bits_data, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_r_bits_resp, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_r_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_r_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25]
output auto_in_r_bits_last, // @[LazyModuleImp.scala:107:25]
input auto_out_aw_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_aw_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_aw_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_aw_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_aw_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_aw_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_out_aw_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_out_aw_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_aw_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_aw_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_aw_bits_qos, // @[LazyModuleImp.scala:107:25]
input auto_out_w_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_w_valid, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_w_bits_data, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_w_bits_strb, // @[LazyModuleImp.scala:107:25]
output auto_out_w_bits_last, // @[LazyModuleImp.scala:107:25]
output auto_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_b_bits_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_b_bits_resp, // @[LazyModuleImp.scala:107:25]
input auto_out_ar_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_ar_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_ar_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_ar_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_ar_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_ar_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_out_ar_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_out_ar_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_ar_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_ar_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_ar_bits_qos, // @[LazyModuleImp.scala:107:25]
output auto_out_r_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_r_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_r_bits_id, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_r_bits_data, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_r_bits_resp, // @[LazyModuleImp.scala:107:25]
input auto_out_r_bits_last // @[LazyModuleImp.scala:107:25]
);
wire auto_in_aw_valid_0 = auto_in_aw_valid; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_aw_bits_id_0 = auto_in_aw_bits_id; // @[UserYanker.scala:36:9]
wire [31:0] auto_in_aw_bits_addr_0 = auto_in_aw_bits_addr; // @[UserYanker.scala:36:9]
wire [7:0] auto_in_aw_bits_len_0 = auto_in_aw_bits_len; // @[UserYanker.scala:36:9]
wire [2:0] auto_in_aw_bits_size_0 = auto_in_aw_bits_size; // @[UserYanker.scala:36:9]
wire [1:0] auto_in_aw_bits_burst_0 = auto_in_aw_bits_burst; // @[UserYanker.scala:36:9]
wire auto_in_aw_bits_lock_0 = auto_in_aw_bits_lock; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_aw_bits_cache_0 = auto_in_aw_bits_cache; // @[UserYanker.scala:36:9]
wire [2:0] auto_in_aw_bits_prot_0 = auto_in_aw_bits_prot; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_aw_bits_qos_0 = auto_in_aw_bits_qos; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_aw_bits_echo_tl_state_size_0 = auto_in_aw_bits_echo_tl_state_size; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_aw_bits_echo_tl_state_source_0 = auto_in_aw_bits_echo_tl_state_source; // @[UserYanker.scala:36:9]
wire auto_in_w_valid_0 = auto_in_w_valid; // @[UserYanker.scala:36:9]
wire [63:0] auto_in_w_bits_data_0 = auto_in_w_bits_data; // @[UserYanker.scala:36:9]
wire [7:0] auto_in_w_bits_strb_0 = auto_in_w_bits_strb; // @[UserYanker.scala:36:9]
wire auto_in_w_bits_last_0 = auto_in_w_bits_last; // @[UserYanker.scala:36:9]
wire auto_in_b_ready_0 = auto_in_b_ready; // @[UserYanker.scala:36:9]
wire auto_in_ar_valid_0 = auto_in_ar_valid; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_ar_bits_id_0 = auto_in_ar_bits_id; // @[UserYanker.scala:36:9]
wire [31:0] auto_in_ar_bits_addr_0 = auto_in_ar_bits_addr; // @[UserYanker.scala:36:9]
wire [7:0] auto_in_ar_bits_len_0 = auto_in_ar_bits_len; // @[UserYanker.scala:36:9]
wire [2:0] auto_in_ar_bits_size_0 = auto_in_ar_bits_size; // @[UserYanker.scala:36:9]
wire [1:0] auto_in_ar_bits_burst_0 = auto_in_ar_bits_burst; // @[UserYanker.scala:36:9]
wire auto_in_ar_bits_lock_0 = auto_in_ar_bits_lock; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_ar_bits_cache_0 = auto_in_ar_bits_cache; // @[UserYanker.scala:36:9]
wire [2:0] auto_in_ar_bits_prot_0 = auto_in_ar_bits_prot; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_ar_bits_qos_0 = auto_in_ar_bits_qos; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_ar_bits_echo_tl_state_size_0 = auto_in_ar_bits_echo_tl_state_size; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_ar_bits_echo_tl_state_source_0 = auto_in_ar_bits_echo_tl_state_source; // @[UserYanker.scala:36:9]
wire auto_in_r_ready_0 = auto_in_r_ready; // @[UserYanker.scala:36:9]
wire auto_out_aw_ready_0 = auto_out_aw_ready; // @[UserYanker.scala:36:9]
wire auto_out_w_ready_0 = auto_out_w_ready; // @[UserYanker.scala:36:9]
wire auto_out_b_valid_0 = auto_out_b_valid; // @[UserYanker.scala:36:9]
wire [3:0] auto_out_b_bits_id_0 = auto_out_b_bits_id; // @[UserYanker.scala:36:9]
wire [1:0] auto_out_b_bits_resp_0 = auto_out_b_bits_resp; // @[UserYanker.scala:36:9]
wire auto_out_ar_ready_0 = auto_out_ar_ready; // @[UserYanker.scala:36:9]
wire auto_out_r_valid_0 = auto_out_r_valid; // @[UserYanker.scala:36:9]
wire [3:0] auto_out_r_bits_id_0 = auto_out_r_bits_id; // @[UserYanker.scala:36:9]
wire [63:0] auto_out_r_bits_data_0 = auto_out_r_bits_data; // @[UserYanker.scala:36:9]
wire [1:0] auto_out_r_bits_resp_0 = auto_out_r_bits_resp; // @[UserYanker.scala:36:9]
wire auto_out_r_bits_last_0 = auto_out_r_bits_last; // @[UserYanker.scala:36:9]
wire [3:0] rqueues_10_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_10_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_11_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_11_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_12_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_12_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_13_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_13_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_14_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_14_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_15_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_15_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_10_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_10_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_11_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_11_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_12_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_12_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_13_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_13_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_14_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_14_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_15_deq_bits_tl_state_size = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_15_deq_bits_tl_state_source = 4'h0; // @[UserYanker.scala:49:15]
wire [3:0] _r_bits_WIRE_10_tl_state_size = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_10_tl_state_source = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_11_tl_state_size = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_11_tl_state_source = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_12_tl_state_size = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_12_tl_state_source = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_13_tl_state_size = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_13_tl_state_source = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_14_tl_state_size = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_14_tl_state_source = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_15_tl_state_size = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_15_tl_state_source = 4'h0; // @[UserYanker.scala:68:27]
wire [3:0] _b_bits_WIRE_10_tl_state_size = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_10_tl_state_source = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_11_tl_state_size = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_11_tl_state_source = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_12_tl_state_size = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_12_tl_state_source = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_13_tl_state_size = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_13_tl_state_source = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_14_tl_state_size = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_14_tl_state_source = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_15_tl_state_size = 4'h0; // @[UserYanker.scala:97:27]
wire [3:0] _b_bits_WIRE_15_tl_state_source = 4'h0; // @[UserYanker.scala:97:27]
wire rqueues_10_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_10_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_10_count = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_11_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_11_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_11_count = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_12_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_12_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_12_count = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_13_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_13_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_13_count = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_14_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_14_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_14_count = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_15_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_15_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire rqueues_15_count = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_10_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_10_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_10_count = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_11_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_11_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_11_count = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_12_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_12_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_12_count = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_13_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_13_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_13_count = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_14_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_14_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_14_count = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_15_enq_ready = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_15_deq_valid = 1'h0; // @[UserYanker.scala:49:15]
wire wqueues_15_count = 1'h0; // @[UserYanker.scala:49:15]
wire _ar_ready_WIRE_10 = 1'h0; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_11 = 1'h0; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_12 = 1'h0; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_13 = 1'h0; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_14 = 1'h0; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_15 = 1'h0; // @[UserYanker.scala:59:29]
wire _r_valid_WIRE_10 = 1'h0; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_11 = 1'h0; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_12 = 1'h0; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_13 = 1'h0; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_14 = 1'h0; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_15 = 1'h0; // @[UserYanker.scala:67:28]
wire _aw_ready_WIRE_10 = 1'h0; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_11 = 1'h0; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_12 = 1'h0; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_13 = 1'h0; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_14 = 1'h0; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_15 = 1'h0; // @[UserYanker.scala:88:29]
wire _b_valid_WIRE_10 = 1'h0; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_11 = 1'h0; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_12 = 1'h0; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_13 = 1'h0; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_14 = 1'h0; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_15 = 1'h0; // @[UserYanker.scala:96:28]
wire nodeIn_aw_ready; // @[MixedNode.scala:551:17]
wire nodeIn_aw_valid = auto_in_aw_valid_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeIn_aw_bits_id = auto_in_aw_bits_id_0; // @[UserYanker.scala:36:9]
wire [31:0] nodeIn_aw_bits_addr = auto_in_aw_bits_addr_0; // @[UserYanker.scala:36:9]
wire [7:0] nodeIn_aw_bits_len = auto_in_aw_bits_len_0; // @[UserYanker.scala:36:9]
wire [2:0] nodeIn_aw_bits_size = auto_in_aw_bits_size_0; // @[UserYanker.scala:36:9]
wire [1:0] nodeIn_aw_bits_burst = auto_in_aw_bits_burst_0; // @[UserYanker.scala:36:9]
wire nodeIn_aw_bits_lock = auto_in_aw_bits_lock_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeIn_aw_bits_cache = auto_in_aw_bits_cache_0; // @[UserYanker.scala:36:9]
wire [2:0] nodeIn_aw_bits_prot = auto_in_aw_bits_prot_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeIn_aw_bits_qos = auto_in_aw_bits_qos_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeIn_aw_bits_echo_tl_state_size = auto_in_aw_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeIn_aw_bits_echo_tl_state_source = auto_in_aw_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9]
wire nodeIn_w_ready; // @[MixedNode.scala:551:17]
wire nodeIn_w_valid = auto_in_w_valid_0; // @[UserYanker.scala:36:9]
wire [63:0] nodeIn_w_bits_data = auto_in_w_bits_data_0; // @[UserYanker.scala:36:9]
wire [7:0] nodeIn_w_bits_strb = auto_in_w_bits_strb_0; // @[UserYanker.scala:36:9]
wire nodeIn_w_bits_last = auto_in_w_bits_last_0; // @[UserYanker.scala:36:9]
wire nodeIn_b_ready = auto_in_b_ready_0; // @[UserYanker.scala:36:9]
wire nodeIn_b_valid; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_b_bits_id; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_b_bits_resp; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_b_bits_echo_tl_state_size; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_b_bits_echo_tl_state_source; // @[MixedNode.scala:551:17]
wire nodeIn_ar_ready; // @[MixedNode.scala:551:17]
wire nodeIn_ar_valid = auto_in_ar_valid_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeIn_ar_bits_id = auto_in_ar_bits_id_0; // @[UserYanker.scala:36:9]
wire [31:0] nodeIn_ar_bits_addr = auto_in_ar_bits_addr_0; // @[UserYanker.scala:36:9]
wire [7:0] nodeIn_ar_bits_len = auto_in_ar_bits_len_0; // @[UserYanker.scala:36:9]
wire [2:0] nodeIn_ar_bits_size = auto_in_ar_bits_size_0; // @[UserYanker.scala:36:9]
wire [1:0] nodeIn_ar_bits_burst = auto_in_ar_bits_burst_0; // @[UserYanker.scala:36:9]
wire nodeIn_ar_bits_lock = auto_in_ar_bits_lock_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeIn_ar_bits_cache = auto_in_ar_bits_cache_0; // @[UserYanker.scala:36:9]
wire [2:0] nodeIn_ar_bits_prot = auto_in_ar_bits_prot_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeIn_ar_bits_qos = auto_in_ar_bits_qos_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeIn_ar_bits_echo_tl_state_size = auto_in_ar_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeIn_ar_bits_echo_tl_state_source = auto_in_ar_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9]
wire nodeIn_r_ready = auto_in_r_ready_0; // @[UserYanker.scala:36:9]
wire nodeIn_r_valid; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_r_bits_id; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_r_bits_data; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_r_bits_resp; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_r_bits_echo_tl_state_size; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_r_bits_echo_tl_state_source; // @[MixedNode.scala:551:17]
wire nodeIn_r_bits_last; // @[MixedNode.scala:551:17]
wire nodeOut_aw_ready = auto_out_aw_ready_0; // @[UserYanker.scala:36:9]
wire nodeOut_aw_valid; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_aw_bits_id; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_aw_bits_addr; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_aw_bits_len; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_aw_bits_size; // @[MixedNode.scala:542:17]
wire [1:0] nodeOut_aw_bits_burst; // @[MixedNode.scala:542:17]
wire nodeOut_aw_bits_lock; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_aw_bits_cache; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_aw_bits_prot; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_aw_bits_qos; // @[MixedNode.scala:542:17]
wire nodeOut_w_ready = auto_out_w_ready_0; // @[UserYanker.scala:36:9]
wire nodeOut_w_valid; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_w_bits_data; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_w_bits_strb; // @[MixedNode.scala:542:17]
wire nodeOut_w_bits_last; // @[MixedNode.scala:542:17]
wire nodeOut_b_ready; // @[MixedNode.scala:542:17]
wire nodeOut_b_valid = auto_out_b_valid_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeOut_b_bits_id = auto_out_b_bits_id_0; // @[UserYanker.scala:36:9]
wire [1:0] nodeOut_b_bits_resp = auto_out_b_bits_resp_0; // @[UserYanker.scala:36:9]
wire nodeOut_ar_ready = auto_out_ar_ready_0; // @[UserYanker.scala:36:9]
wire nodeOut_ar_valid; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_ar_bits_id; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_ar_bits_addr; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_ar_bits_len; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_ar_bits_size; // @[MixedNode.scala:542:17]
wire [1:0] nodeOut_ar_bits_burst; // @[MixedNode.scala:542:17]
wire nodeOut_ar_bits_lock; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_ar_bits_cache; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_ar_bits_prot; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_ar_bits_qos; // @[MixedNode.scala:542:17]
wire nodeOut_r_ready; // @[MixedNode.scala:542:17]
wire nodeOut_r_valid = auto_out_r_valid_0; // @[UserYanker.scala:36:9]
wire [3:0] nodeOut_r_bits_id = auto_out_r_bits_id_0; // @[UserYanker.scala:36:9]
wire [63:0] nodeOut_r_bits_data = auto_out_r_bits_data_0; // @[UserYanker.scala:36:9]
wire [1:0] nodeOut_r_bits_resp = auto_out_r_bits_resp_0; // @[UserYanker.scala:36:9]
wire nodeOut_r_bits_last = auto_out_r_bits_last_0; // @[UserYanker.scala:36:9]
wire auto_in_aw_ready_0; // @[UserYanker.scala:36:9]
wire auto_in_w_ready_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_b_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_b_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_b_bits_id_0; // @[UserYanker.scala:36:9]
wire [1:0] auto_in_b_bits_resp_0; // @[UserYanker.scala:36:9]
wire auto_in_b_valid_0; // @[UserYanker.scala:36:9]
wire auto_in_ar_ready_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_r_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_r_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_in_r_bits_id_0; // @[UserYanker.scala:36:9]
wire [63:0] auto_in_r_bits_data_0; // @[UserYanker.scala:36:9]
wire [1:0] auto_in_r_bits_resp_0; // @[UserYanker.scala:36:9]
wire auto_in_r_bits_last_0; // @[UserYanker.scala:36:9]
wire auto_in_r_valid_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_out_aw_bits_id_0; // @[UserYanker.scala:36:9]
wire [31:0] auto_out_aw_bits_addr_0; // @[UserYanker.scala:36:9]
wire [7:0] auto_out_aw_bits_len_0; // @[UserYanker.scala:36:9]
wire [2:0] auto_out_aw_bits_size_0; // @[UserYanker.scala:36:9]
wire [1:0] auto_out_aw_bits_burst_0; // @[UserYanker.scala:36:9]
wire auto_out_aw_bits_lock_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_out_aw_bits_cache_0; // @[UserYanker.scala:36:9]
wire [2:0] auto_out_aw_bits_prot_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_out_aw_bits_qos_0; // @[UserYanker.scala:36:9]
wire auto_out_aw_valid_0; // @[UserYanker.scala:36:9]
wire [63:0] auto_out_w_bits_data_0; // @[UserYanker.scala:36:9]
wire [7:0] auto_out_w_bits_strb_0; // @[UserYanker.scala:36:9]
wire auto_out_w_bits_last_0; // @[UserYanker.scala:36:9]
wire auto_out_w_valid_0; // @[UserYanker.scala:36:9]
wire auto_out_b_ready_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_out_ar_bits_id_0; // @[UserYanker.scala:36:9]
wire [31:0] auto_out_ar_bits_addr_0; // @[UserYanker.scala:36:9]
wire [7:0] auto_out_ar_bits_len_0; // @[UserYanker.scala:36:9]
wire [2:0] auto_out_ar_bits_size_0; // @[UserYanker.scala:36:9]
wire [1:0] auto_out_ar_bits_burst_0; // @[UserYanker.scala:36:9]
wire auto_out_ar_bits_lock_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_out_ar_bits_cache_0; // @[UserYanker.scala:36:9]
wire [2:0] auto_out_ar_bits_prot_0; // @[UserYanker.scala:36:9]
wire [3:0] auto_out_ar_bits_qos_0; // @[UserYanker.scala:36:9]
wire auto_out_ar_valid_0; // @[UserYanker.scala:36:9]
wire auto_out_r_ready_0; // @[UserYanker.scala:36:9]
wire _nodeIn_aw_ready_T; // @[UserYanker.scala:89:36]
assign auto_in_aw_ready_0 = nodeIn_aw_ready; // @[UserYanker.scala:36:9]
assign nodeOut_aw_bits_id = nodeIn_aw_bits_id; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] awsel_shiftAmount = nodeIn_aw_bits_id; // @[OneHot.scala:64:49]
assign nodeOut_aw_bits_addr = nodeIn_aw_bits_addr; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_aw_bits_len = nodeIn_aw_bits_len; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_aw_bits_size = nodeIn_aw_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_aw_bits_burst = nodeIn_aw_bits_burst; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_aw_bits_lock = nodeIn_aw_bits_lock; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_aw_bits_cache = nodeIn_aw_bits_cache; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_aw_bits_prot = nodeIn_aw_bits_prot; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_aw_bits_qos = nodeIn_aw_bits_qos; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] wqueues_10_enq_bits_tl_state_size = nodeIn_aw_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_11_enq_bits_tl_state_size = nodeIn_aw_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_12_enq_bits_tl_state_size = nodeIn_aw_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_13_enq_bits_tl_state_size = nodeIn_aw_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_14_enq_bits_tl_state_size = nodeIn_aw_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_15_enq_bits_tl_state_size = nodeIn_aw_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_10_enq_bits_tl_state_source = nodeIn_aw_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_11_enq_bits_tl_state_source = nodeIn_aw_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_12_enq_bits_tl_state_source = nodeIn_aw_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_13_enq_bits_tl_state_source = nodeIn_aw_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_14_enq_bits_tl_state_source = nodeIn_aw_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
wire [3:0] wqueues_15_enq_bits_tl_state_source = nodeIn_aw_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
assign auto_in_w_ready_0 = nodeIn_w_ready; // @[UserYanker.scala:36:9]
assign nodeOut_w_valid = nodeIn_w_valid; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_w_bits_data = nodeIn_w_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_w_bits_strb = nodeIn_w_bits_strb; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_w_bits_last = nodeIn_w_bits_last; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_b_ready = nodeIn_b_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_in_b_valid_0 = nodeIn_b_valid; // @[UserYanker.scala:36:9]
assign auto_in_b_bits_id_0 = nodeIn_b_bits_id; // @[UserYanker.scala:36:9]
assign auto_in_b_bits_resp_0 = nodeIn_b_bits_resp; // @[UserYanker.scala:36:9]
assign auto_in_b_bits_echo_tl_state_size_0 = nodeIn_b_bits_echo_tl_state_size; // @[UserYanker.scala:36:9]
assign auto_in_b_bits_echo_tl_state_source_0 = nodeIn_b_bits_echo_tl_state_source; // @[UserYanker.scala:36:9]
wire _nodeIn_ar_ready_T; // @[UserYanker.scala:60:36]
assign auto_in_ar_ready_0 = nodeIn_ar_ready; // @[UserYanker.scala:36:9]
assign nodeOut_ar_bits_id = nodeIn_ar_bits_id; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] arsel_shiftAmount = nodeIn_ar_bits_id; // @[OneHot.scala:64:49]
assign nodeOut_ar_bits_addr = nodeIn_ar_bits_addr; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_ar_bits_len = nodeIn_ar_bits_len; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_ar_bits_size = nodeIn_ar_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_ar_bits_burst = nodeIn_ar_bits_burst; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_ar_bits_lock = nodeIn_ar_bits_lock; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_ar_bits_cache = nodeIn_ar_bits_cache; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_ar_bits_prot = nodeIn_ar_bits_prot; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_ar_bits_qos = nodeIn_ar_bits_qos; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] rqueues_10_enq_bits_tl_state_size = nodeIn_ar_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_11_enq_bits_tl_state_size = nodeIn_ar_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_12_enq_bits_tl_state_size = nodeIn_ar_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_13_enq_bits_tl_state_size = nodeIn_ar_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_14_enq_bits_tl_state_size = nodeIn_ar_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_15_enq_bits_tl_state_size = nodeIn_ar_bits_echo_tl_state_size; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_10_enq_bits_tl_state_source = nodeIn_ar_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_11_enq_bits_tl_state_source = nodeIn_ar_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_12_enq_bits_tl_state_source = nodeIn_ar_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_13_enq_bits_tl_state_source = nodeIn_ar_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_14_enq_bits_tl_state_source = nodeIn_ar_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
wire [3:0] rqueues_15_enq_bits_tl_state_source = nodeIn_ar_bits_echo_tl_state_source; // @[UserYanker.scala:49:15]
assign nodeOut_r_ready = nodeIn_r_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_in_r_valid_0 = nodeIn_r_valid; // @[UserYanker.scala:36:9]
assign auto_in_r_bits_id_0 = nodeIn_r_bits_id; // @[UserYanker.scala:36:9]
assign auto_in_r_bits_data_0 = nodeIn_r_bits_data; // @[UserYanker.scala:36:9]
assign auto_in_r_bits_resp_0 = nodeIn_r_bits_resp; // @[UserYanker.scala:36:9]
assign auto_in_r_bits_echo_tl_state_size_0 = nodeIn_r_bits_echo_tl_state_size; // @[UserYanker.scala:36:9]
assign auto_in_r_bits_echo_tl_state_source_0 = nodeIn_r_bits_echo_tl_state_source; // @[UserYanker.scala:36:9]
assign auto_in_r_bits_last_0 = nodeIn_r_bits_last; // @[UserYanker.scala:36:9]
wire _nodeOut_aw_valid_T; // @[UserYanker.scala:90:36]
assign auto_out_aw_valid_0 = nodeOut_aw_valid; // @[UserYanker.scala:36:9]
assign auto_out_aw_bits_id_0 = nodeOut_aw_bits_id; // @[UserYanker.scala:36:9]
assign auto_out_aw_bits_addr_0 = nodeOut_aw_bits_addr; // @[UserYanker.scala:36:9]
assign auto_out_aw_bits_len_0 = nodeOut_aw_bits_len; // @[UserYanker.scala:36:9]
assign auto_out_aw_bits_size_0 = nodeOut_aw_bits_size; // @[UserYanker.scala:36:9]
assign auto_out_aw_bits_burst_0 = nodeOut_aw_bits_burst; // @[UserYanker.scala:36:9]
assign auto_out_aw_bits_lock_0 = nodeOut_aw_bits_lock; // @[UserYanker.scala:36:9]
assign auto_out_aw_bits_cache_0 = nodeOut_aw_bits_cache; // @[UserYanker.scala:36:9]
assign auto_out_aw_bits_prot_0 = nodeOut_aw_bits_prot; // @[UserYanker.scala:36:9]
assign auto_out_aw_bits_qos_0 = nodeOut_aw_bits_qos; // @[UserYanker.scala:36:9]
assign nodeIn_w_ready = nodeOut_w_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_out_w_valid_0 = nodeOut_w_valid; // @[UserYanker.scala:36:9]
assign auto_out_w_bits_data_0 = nodeOut_w_bits_data; // @[UserYanker.scala:36:9]
assign auto_out_w_bits_strb_0 = nodeOut_w_bits_strb; // @[UserYanker.scala:36:9]
assign auto_out_w_bits_last_0 = nodeOut_w_bits_last; // @[UserYanker.scala:36:9]
assign auto_out_b_ready_0 = nodeOut_b_ready; // @[UserYanker.scala:36:9]
assign nodeIn_b_valid = nodeOut_b_valid; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_b_bits_id = nodeOut_b_bits_id; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] bsel_shiftAmount = nodeOut_b_bits_id; // @[OneHot.scala:64:49]
assign nodeIn_b_bits_resp = nodeOut_b_bits_resp; // @[MixedNode.scala:542:17, :551:17]
wire _nodeOut_ar_valid_T; // @[UserYanker.scala:61:36]
assign auto_out_ar_valid_0 = nodeOut_ar_valid; // @[UserYanker.scala:36:9]
assign auto_out_ar_bits_id_0 = nodeOut_ar_bits_id; // @[UserYanker.scala:36:9]
assign auto_out_ar_bits_addr_0 = nodeOut_ar_bits_addr; // @[UserYanker.scala:36:9]
assign auto_out_ar_bits_len_0 = nodeOut_ar_bits_len; // @[UserYanker.scala:36:9]
assign auto_out_ar_bits_size_0 = nodeOut_ar_bits_size; // @[UserYanker.scala:36:9]
assign auto_out_ar_bits_burst_0 = nodeOut_ar_bits_burst; // @[UserYanker.scala:36:9]
assign auto_out_ar_bits_lock_0 = nodeOut_ar_bits_lock; // @[UserYanker.scala:36:9]
assign auto_out_ar_bits_cache_0 = nodeOut_ar_bits_cache; // @[UserYanker.scala:36:9]
assign auto_out_ar_bits_prot_0 = nodeOut_ar_bits_prot; // @[UserYanker.scala:36:9]
assign auto_out_ar_bits_qos_0 = nodeOut_ar_bits_qos; // @[UserYanker.scala:36:9]
assign auto_out_r_ready_0 = nodeOut_r_ready; // @[UserYanker.scala:36:9]
assign nodeIn_r_valid = nodeOut_r_valid; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_r_bits_id = nodeOut_r_bits_id; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] rsel_shiftAmount = nodeOut_r_bits_id; // @[OneHot.scala:64:49]
assign nodeIn_r_bits_data = nodeOut_r_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_r_bits_resp = nodeOut_r_bits_resp; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_r_bits_last = nodeOut_r_bits_last; // @[MixedNode.scala:542:17, :551:17]
wire _rqueues_10_enq_valid_T_1; // @[UserYanker.scala:81:53]
wire _rqueues_10_deq_ready_T_2; // @[UserYanker.scala:78:58]
wire rqueues_10_enq_valid; // @[UserYanker.scala:49:15]
wire rqueues_10_deq_ready; // @[UserYanker.scala:49:15]
wire _rqueues_11_enq_valid_T_1; // @[UserYanker.scala:81:53]
wire _rqueues_11_deq_ready_T_2; // @[UserYanker.scala:78:58]
wire rqueues_11_enq_valid; // @[UserYanker.scala:49:15]
wire rqueues_11_deq_ready; // @[UserYanker.scala:49:15]
wire _rqueues_12_enq_valid_T_1; // @[UserYanker.scala:81:53]
wire _rqueues_12_deq_ready_T_2; // @[UserYanker.scala:78:58]
wire rqueues_12_enq_valid; // @[UserYanker.scala:49:15]
wire rqueues_12_deq_ready; // @[UserYanker.scala:49:15]
wire _rqueues_13_enq_valid_T_1; // @[UserYanker.scala:81:53]
wire _rqueues_13_deq_ready_T_2; // @[UserYanker.scala:78:58]
wire rqueues_13_enq_valid; // @[UserYanker.scala:49:15]
wire rqueues_13_deq_ready; // @[UserYanker.scala:49:15]
wire _rqueues_14_enq_valid_T_1; // @[UserYanker.scala:81:53]
wire _rqueues_14_deq_ready_T_2; // @[UserYanker.scala:78:58]
wire rqueues_14_enq_valid; // @[UserYanker.scala:49:15]
wire rqueues_14_deq_ready; // @[UserYanker.scala:49:15]
wire _rqueues_15_enq_valid_T_1; // @[UserYanker.scala:81:53]
wire _rqueues_15_deq_ready_T_2; // @[UserYanker.scala:78:58]
wire rqueues_15_enq_valid; // @[UserYanker.scala:49:15]
wire rqueues_15_deq_ready; // @[UserYanker.scala:49:15]
wire _wqueues_10_enq_valid_T_1; // @[UserYanker.scala:110:53]
wire _wqueues_10_deq_ready_T_1; // @[UserYanker.scala:107:53]
wire wqueues_10_enq_valid; // @[UserYanker.scala:49:15]
wire wqueues_10_deq_ready; // @[UserYanker.scala:49:15]
wire _wqueues_11_enq_valid_T_1; // @[UserYanker.scala:110:53]
wire _wqueues_11_deq_ready_T_1; // @[UserYanker.scala:107:53]
wire wqueues_11_enq_valid; // @[UserYanker.scala:49:15]
wire wqueues_11_deq_ready; // @[UserYanker.scala:49:15]
wire _wqueues_12_enq_valid_T_1; // @[UserYanker.scala:110:53]
wire _wqueues_12_deq_ready_T_1; // @[UserYanker.scala:107:53]
wire wqueues_12_enq_valid; // @[UserYanker.scala:49:15]
wire wqueues_12_deq_ready; // @[UserYanker.scala:49:15]
wire _wqueues_13_enq_valid_T_1; // @[UserYanker.scala:110:53]
wire _wqueues_13_deq_ready_T_1; // @[UserYanker.scala:107:53]
wire wqueues_13_enq_valid; // @[UserYanker.scala:49:15]
wire wqueues_13_deq_ready; // @[UserYanker.scala:49:15]
wire _wqueues_14_enq_valid_T_1; // @[UserYanker.scala:110:53]
wire _wqueues_14_deq_ready_T_1; // @[UserYanker.scala:107:53]
wire wqueues_14_enq_valid; // @[UserYanker.scala:49:15]
wire wqueues_14_deq_ready; // @[UserYanker.scala:49:15]
wire _wqueues_15_enq_valid_T_1; // @[UserYanker.scala:110:53]
wire _wqueues_15_deq_ready_T_1; // @[UserYanker.scala:107:53]
wire wqueues_15_enq_valid; // @[UserYanker.scala:49:15]
wire wqueues_15_deq_ready; // @[UserYanker.scala:49:15]
wire _ar_ready_WIRE_0; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_1; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_2; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_3; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_4; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_5; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_6; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_7; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_8; // @[UserYanker.scala:59:29]
wire _ar_ready_WIRE_9; // @[UserYanker.scala:59:29]
wire [15:0] _GEN = {{1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {_ar_ready_WIRE_9}, {_ar_ready_WIRE_8}, {_ar_ready_WIRE_7}, {_ar_ready_WIRE_6}, {_ar_ready_WIRE_5}, {_ar_ready_WIRE_4}, {_ar_ready_WIRE_3}, {_ar_ready_WIRE_2}, {_ar_ready_WIRE_1}, {_ar_ready_WIRE_0}}; // @[UserYanker.scala:59:29, :60:36]
assign _nodeIn_ar_ready_T = nodeOut_ar_ready & _GEN[nodeIn_ar_bits_id]; // @[UserYanker.scala:60:36]
assign nodeIn_ar_ready = _nodeIn_ar_ready_T; // @[UserYanker.scala:60:36]
assign _nodeOut_ar_valid_T = nodeIn_ar_valid & _GEN[nodeIn_ar_bits_id]; // @[UserYanker.scala:60:36, :61:36]
assign nodeOut_ar_valid = _nodeOut_ar_valid_T; // @[UserYanker.scala:61:36]
wire [3:0] _r_bits_WIRE_0_tl_state_size; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_1_tl_state_size; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_2_tl_state_size; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_3_tl_state_size; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_4_tl_state_size; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_5_tl_state_size; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_6_tl_state_size; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_7_tl_state_size; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_8_tl_state_size; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_9_tl_state_size; // @[UserYanker.scala:68:27]
wire [15:0][3:0] _GEN_0 = {{4'h0}, {4'h0}, {4'h0}, {4'h0}, {4'h0}, {4'h0}, {_r_bits_WIRE_9_tl_state_size}, {_r_bits_WIRE_8_tl_state_size}, {_r_bits_WIRE_7_tl_state_size}, {_r_bits_WIRE_6_tl_state_size}, {_r_bits_WIRE_5_tl_state_size}, {_r_bits_WIRE_4_tl_state_size}, {_r_bits_WIRE_3_tl_state_size}, {_r_bits_WIRE_2_tl_state_size}, {_r_bits_WIRE_1_tl_state_size}, {_r_bits_WIRE_0_tl_state_size}}; // @[UserYanker.scala:68:27, :73:22]
assign nodeIn_r_bits_echo_tl_state_size = _GEN_0[nodeOut_r_bits_id]; // @[UserYanker.scala:73:22]
wire [3:0] _r_bits_WIRE_0_tl_state_source; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_1_tl_state_source; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_2_tl_state_source; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_3_tl_state_source; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_4_tl_state_source; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_5_tl_state_source; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_6_tl_state_source; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_7_tl_state_source; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_8_tl_state_source; // @[UserYanker.scala:68:27]
wire [3:0] _r_bits_WIRE_9_tl_state_source; // @[UserYanker.scala:68:27]
wire [15:0][3:0] _GEN_1 = {{4'h0}, {4'h0}, {4'h0}, {4'h0}, {4'h0}, {4'h0}, {_r_bits_WIRE_9_tl_state_source}, {_r_bits_WIRE_8_tl_state_source}, {_r_bits_WIRE_7_tl_state_source}, {_r_bits_WIRE_6_tl_state_source}, {_r_bits_WIRE_5_tl_state_source}, {_r_bits_WIRE_4_tl_state_source}, {_r_bits_WIRE_3_tl_state_source}, {_r_bits_WIRE_2_tl_state_source}, {_r_bits_WIRE_1_tl_state_source}, {_r_bits_WIRE_0_tl_state_source}}; // @[UserYanker.scala:68:27, :73:22]
assign nodeIn_r_bits_echo_tl_state_source = _GEN_1[nodeOut_r_bits_id]; // @[UserYanker.scala:73:22]
wire [15:0] _arsel_T = 16'h1 << arsel_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [15:0] _arsel_T_1 = _arsel_T; // @[OneHot.scala:65:{12,27}]
wire arsel_0 = _arsel_T_1[0]; // @[OneHot.scala:65:27]
wire arsel_1 = _arsel_T_1[1]; // @[OneHot.scala:65:27]
wire arsel_2 = _arsel_T_1[2]; // @[OneHot.scala:65:27]
wire arsel_3 = _arsel_T_1[3]; // @[OneHot.scala:65:27]
wire arsel_4 = _arsel_T_1[4]; // @[OneHot.scala:65:27]
wire arsel_5 = _arsel_T_1[5]; // @[OneHot.scala:65:27]
wire arsel_6 = _arsel_T_1[6]; // @[OneHot.scala:65:27]
wire arsel_7 = _arsel_T_1[7]; // @[OneHot.scala:65:27]
wire arsel_8 = _arsel_T_1[8]; // @[OneHot.scala:65:27]
wire arsel_9 = _arsel_T_1[9]; // @[OneHot.scala:65:27]
wire arsel_10 = _arsel_T_1[10]; // @[OneHot.scala:65:27]
wire arsel_11 = _arsel_T_1[11]; // @[OneHot.scala:65:27]
wire arsel_12 = _arsel_T_1[12]; // @[OneHot.scala:65:27]
wire arsel_13 = _arsel_T_1[13]; // @[OneHot.scala:65:27]
wire arsel_14 = _arsel_T_1[14]; // @[OneHot.scala:65:27]
wire arsel_15 = _arsel_T_1[15]; // @[OneHot.scala:65:27]
wire [15:0] _rsel_T = 16'h1 << rsel_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [15:0] _rsel_T_1 = _rsel_T; // @[OneHot.scala:65:{12,27}]
wire rsel_0 = _rsel_T_1[0]; // @[OneHot.scala:65:27]
wire rsel_1 = _rsel_T_1[1]; // @[OneHot.scala:65:27]
wire rsel_2 = _rsel_T_1[2]; // @[OneHot.scala:65:27]
wire rsel_3 = _rsel_T_1[3]; // @[OneHot.scala:65:27]
wire rsel_4 = _rsel_T_1[4]; // @[OneHot.scala:65:27]
wire rsel_5 = _rsel_T_1[5]; // @[OneHot.scala:65:27]
wire rsel_6 = _rsel_T_1[6]; // @[OneHot.scala:65:27]
wire rsel_7 = _rsel_T_1[7]; // @[OneHot.scala:65:27]
wire rsel_8 = _rsel_T_1[8]; // @[OneHot.scala:65:27]
wire rsel_9 = _rsel_T_1[9]; // @[OneHot.scala:65:27]
wire rsel_10 = _rsel_T_1[10]; // @[OneHot.scala:65:27]
wire rsel_11 = _rsel_T_1[11]; // @[OneHot.scala:65:27]
wire rsel_12 = _rsel_T_1[12]; // @[OneHot.scala:65:27]
wire rsel_13 = _rsel_T_1[13]; // @[OneHot.scala:65:27]
wire rsel_14 = _rsel_T_1[14]; // @[OneHot.scala:65:27]
wire rsel_15 = _rsel_T_1[15]; // @[OneHot.scala:65:27]
wire _T_50 = nodeOut_r_valid & nodeIn_r_ready; // @[UserYanker.scala:78:37]
wire _rqueues_10_deq_ready_T; // @[UserYanker.scala:78:37]
assign _rqueues_10_deq_ready_T = _T_50; // @[UserYanker.scala:78:37]
wire _rqueues_11_deq_ready_T; // @[UserYanker.scala:78:37]
assign _rqueues_11_deq_ready_T = _T_50; // @[UserYanker.scala:78:37]
wire _rqueues_12_deq_ready_T; // @[UserYanker.scala:78:37]
assign _rqueues_12_deq_ready_T = _T_50; // @[UserYanker.scala:78:37]
wire _rqueues_13_deq_ready_T; // @[UserYanker.scala:78:37]
assign _rqueues_13_deq_ready_T = _T_50; // @[UserYanker.scala:78:37]
wire _rqueues_14_deq_ready_T; // @[UserYanker.scala:78:37]
assign _rqueues_14_deq_ready_T = _T_50; // @[UserYanker.scala:78:37]
wire _rqueues_15_deq_ready_T; // @[UserYanker.scala:78:37]
assign _rqueues_15_deq_ready_T = _T_50; // @[UserYanker.scala:78:37]
wire _T_53 = nodeIn_ar_valid & nodeOut_ar_ready; // @[UserYanker.scala:81:37]
wire _rqueues_10_enq_valid_T; // @[UserYanker.scala:81:37]
assign _rqueues_10_enq_valid_T = _T_53; // @[UserYanker.scala:81:37]
wire _rqueues_11_enq_valid_T; // @[UserYanker.scala:81:37]
assign _rqueues_11_enq_valid_T = _T_53; // @[UserYanker.scala:81:37]
wire _rqueues_12_enq_valid_T; // @[UserYanker.scala:81:37]
assign _rqueues_12_enq_valid_T = _T_53; // @[UserYanker.scala:81:37]
wire _rqueues_13_enq_valid_T; // @[UserYanker.scala:81:37]
assign _rqueues_13_enq_valid_T = _T_53; // @[UserYanker.scala:81:37]
wire _rqueues_14_enq_valid_T; // @[UserYanker.scala:81:37]
assign _rqueues_14_enq_valid_T = _T_53; // @[UserYanker.scala:81:37]
wire _rqueues_15_enq_valid_T; // @[UserYanker.scala:81:37]
assign _rqueues_15_enq_valid_T = _T_53; // @[UserYanker.scala:81:37]
wire _rqueues_10_deq_ready_T_1 = _rqueues_10_deq_ready_T & rsel_10; // @[UserYanker.scala:76:55, :78:{37,53}]
assign _rqueues_10_deq_ready_T_2 = _rqueues_10_deq_ready_T_1 & nodeOut_r_bits_last; // @[UserYanker.scala:78:{53,58}]
assign rqueues_10_deq_ready = _rqueues_10_deq_ready_T_2; // @[UserYanker.scala:49:15, :78:58]
assign _rqueues_10_enq_valid_T_1 = _rqueues_10_enq_valid_T & arsel_10; // @[UserYanker.scala:75:55, :81:{37,53}]
assign rqueues_10_enq_valid = _rqueues_10_enq_valid_T_1; // @[UserYanker.scala:49:15, :81:53]
wire _rqueues_11_deq_ready_T_1 = _rqueues_11_deq_ready_T & rsel_11; // @[UserYanker.scala:76:55, :78:{37,53}]
assign _rqueues_11_deq_ready_T_2 = _rqueues_11_deq_ready_T_1 & nodeOut_r_bits_last; // @[UserYanker.scala:78:{53,58}]
assign rqueues_11_deq_ready = _rqueues_11_deq_ready_T_2; // @[UserYanker.scala:49:15, :78:58]
assign _rqueues_11_enq_valid_T_1 = _rqueues_11_enq_valid_T & arsel_11; // @[UserYanker.scala:75:55, :81:{37,53}]
assign rqueues_11_enq_valid = _rqueues_11_enq_valid_T_1; // @[UserYanker.scala:49:15, :81:53]
wire _rqueues_12_deq_ready_T_1 = _rqueues_12_deq_ready_T & rsel_12; // @[UserYanker.scala:76:55, :78:{37,53}]
assign _rqueues_12_deq_ready_T_2 = _rqueues_12_deq_ready_T_1 & nodeOut_r_bits_last; // @[UserYanker.scala:78:{53,58}]
assign rqueues_12_deq_ready = _rqueues_12_deq_ready_T_2; // @[UserYanker.scala:49:15, :78:58]
assign _rqueues_12_enq_valid_T_1 = _rqueues_12_enq_valid_T & arsel_12; // @[UserYanker.scala:75:55, :81:{37,53}]
assign rqueues_12_enq_valid = _rqueues_12_enq_valid_T_1; // @[UserYanker.scala:49:15, :81:53]
wire _rqueues_13_deq_ready_T_1 = _rqueues_13_deq_ready_T & rsel_13; // @[UserYanker.scala:76:55, :78:{37,53}]
assign _rqueues_13_deq_ready_T_2 = _rqueues_13_deq_ready_T_1 & nodeOut_r_bits_last; // @[UserYanker.scala:78:{53,58}]
assign rqueues_13_deq_ready = _rqueues_13_deq_ready_T_2; // @[UserYanker.scala:49:15, :78:58]
assign _rqueues_13_enq_valid_T_1 = _rqueues_13_enq_valid_T & arsel_13; // @[UserYanker.scala:75:55, :81:{37,53}]
assign rqueues_13_enq_valid = _rqueues_13_enq_valid_T_1; // @[UserYanker.scala:49:15, :81:53]
wire _rqueues_14_deq_ready_T_1 = _rqueues_14_deq_ready_T & rsel_14; // @[UserYanker.scala:76:55, :78:{37,53}]
assign _rqueues_14_deq_ready_T_2 = _rqueues_14_deq_ready_T_1 & nodeOut_r_bits_last; // @[UserYanker.scala:78:{53,58}]
assign rqueues_14_deq_ready = _rqueues_14_deq_ready_T_2; // @[UserYanker.scala:49:15, :78:58]
assign _rqueues_14_enq_valid_T_1 = _rqueues_14_enq_valid_T & arsel_14; // @[UserYanker.scala:75:55, :81:{37,53}]
assign rqueues_14_enq_valid = _rqueues_14_enq_valid_T_1; // @[UserYanker.scala:49:15, :81:53]
wire _rqueues_15_deq_ready_T_1 = _rqueues_15_deq_ready_T & rsel_15; // @[UserYanker.scala:76:55, :78:{37,53}]
assign _rqueues_15_deq_ready_T_2 = _rqueues_15_deq_ready_T_1 & nodeOut_r_bits_last; // @[UserYanker.scala:78:{53,58}]
assign rqueues_15_deq_ready = _rqueues_15_deq_ready_T_2; // @[UserYanker.scala:49:15, :78:58]
assign _rqueues_15_enq_valid_T_1 = _rqueues_15_enq_valid_T & arsel_15; // @[UserYanker.scala:75:55, :81:{37,53}]
assign rqueues_15_enq_valid = _rqueues_15_enq_valid_T_1; // @[UserYanker.scala:49:15, :81:53]
wire _aw_ready_WIRE_0; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_1; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_2; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_3; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_4; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_5; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_6; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_7; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_8; // @[UserYanker.scala:88:29]
wire _aw_ready_WIRE_9; // @[UserYanker.scala:88:29]
wire [15:0] _GEN_2 = {{1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {_aw_ready_WIRE_9}, {_aw_ready_WIRE_8}, {_aw_ready_WIRE_7}, {_aw_ready_WIRE_6}, {_aw_ready_WIRE_5}, {_aw_ready_WIRE_4}, {_aw_ready_WIRE_3}, {_aw_ready_WIRE_2}, {_aw_ready_WIRE_1}, {_aw_ready_WIRE_0}}; // @[UserYanker.scala:88:29, :89:36]
assign _nodeIn_aw_ready_T = nodeOut_aw_ready & _GEN_2[nodeIn_aw_bits_id]; // @[UserYanker.scala:89:36]
assign nodeIn_aw_ready = _nodeIn_aw_ready_T; // @[UserYanker.scala:89:36]
assign _nodeOut_aw_valid_T = nodeIn_aw_valid & _GEN_2[nodeIn_aw_bits_id]; // @[UserYanker.scala:89:36, :90:36]
assign nodeOut_aw_valid = _nodeOut_aw_valid_T; // @[UserYanker.scala:90:36]
wire _r_valid_WIRE_0; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_1; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_2; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_3; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_4; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_5; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_6; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_7; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_8; // @[UserYanker.scala:67:28]
wire _r_valid_WIRE_9; // @[UserYanker.scala:67:28]
wire _b_valid_WIRE_0; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_1; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_2; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_3; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_4; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_5; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_6; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_7; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_8; // @[UserYanker.scala:96:28]
wire _b_valid_WIRE_9; // @[UserYanker.scala:96:28] |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
File consts.scala:
//******************************************************************************
// Copyright (c) 2011 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Constants
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.common.constants
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util.Str
import freechips.rocketchip.rocket.RVCExpander
/**
* Mixin for issue queue types
*/
trait IQType
{
val IQT_SZ = 3
val IQT_INT = 1.U(IQT_SZ.W)
val IQT_MEM = 2.U(IQT_SZ.W)
val IQT_FP = 4.U(IQT_SZ.W)
val IQT_MFP = 6.U(IQT_SZ.W)
}
/**
* Mixin for scalar operation constants
*/
trait ScalarOpConstants
{
val X = BitPat("b?")
val Y = BitPat("b1")
val N = BitPat("b0")
//************************************
// Extra Constants
// Which branch predictor predicted us
val BSRC_SZ = 2
val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred
val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred
val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred
val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution
//************************************
// Control Signals
// CFI types
val CFI_SZ = 3
val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction
val CFI_BR = 1.U(CFI_SZ.W) // Branch
val CFI_JAL = 2.U(CFI_SZ.W) // JAL
val CFI_JALR = 3.U(CFI_SZ.W) // JALR
// PC Select Signal
val PC_PLUS4 = 0.U(2.W) // PC + 4
val PC_BRJMP = 1.U(2.W) // brjmp_target
val PC_JALR = 2.U(2.W) // jump_reg_target
// Branch Type
val BR_N = 0.U(4.W) // Next
val BR_NE = 1.U(4.W) // Branch on NotEqual
val BR_EQ = 2.U(4.W) // Branch on Equal
val BR_GE = 3.U(4.W) // Branch on Greater/Equal
val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned
val BR_LT = 5.U(4.W) // Branch on Less Than
val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned
val BR_J = 7.U(4.W) // Jump
val BR_JR = 8.U(4.W) // Jump Register
// RS1 Operand Select Signal
val OP1_RS1 = 0.U(2.W) // Register Source #1
val OP1_ZERO= 1.U(2.W)
val OP1_PC = 2.U(2.W)
val OP1_X = BitPat("b??")
// RS2 Operand Select Signal
val OP2_RS2 = 0.U(3.W) // Register Source #2
val OP2_IMM = 1.U(3.W) // immediate
val OP2_ZERO= 2.U(3.W) // constant 0
val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4)
val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1
val OP2_X = BitPat("b???")
// Register File Write Enable Signal
val REN_0 = false.B
val REN_1 = true.B
// Is 32b Word or 64b Doubldword?
val SZ_DW = 1
val DW_X = true.B // Bool(xLen==64)
val DW_32 = false.B
val DW_64 = true.B
val DW_XPR = true.B // Bool(xLen==64)
// Memory Enable Signal
val MEN_0 = false.B
val MEN_1 = true.B
val MEN_X = false.B
// Immediate Extend Select
val IS_I = 0.U(3.W) // I-Type (LD,ALU)
val IS_S = 1.U(3.W) // S-Type (ST)
val IS_B = 2.U(3.W) // SB-Type (BR)
val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC)
val IS_J = 4.U(3.W) // UJ-Type (J/JAL)
val IS_X = BitPat("b???")
// Decode Stage Control Signals
val RT_FIX = 0.U(2.W)
val RT_FLT = 1.U(2.W)
val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc)
val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.)
// TODO rename RT_NAR
// Micro-op opcodes
// TODO change micro-op opcodes into using enum
val UOPC_SZ = 7
val uopX = BitPat.dontCare(UOPC_SZ)
val uopNOP = 0.U(UOPC_SZ.W)
val uopLD = 1.U(UOPC_SZ.W)
val uopSTA = 2.U(UOPC_SZ.W) // store address generation
val uopSTD = 3.U(UOPC_SZ.W) // store data generation
val uopLUI = 4.U(UOPC_SZ.W)
val uopADDI = 5.U(UOPC_SZ.W)
val uopANDI = 6.U(UOPC_SZ.W)
val uopORI = 7.U(UOPC_SZ.W)
val uopXORI = 8.U(UOPC_SZ.W)
val uopSLTI = 9.U(UOPC_SZ.W)
val uopSLTIU= 10.U(UOPC_SZ.W)
val uopSLLI = 11.U(UOPC_SZ.W)
val uopSRAI = 12.U(UOPC_SZ.W)
val uopSRLI = 13.U(UOPC_SZ.W)
val uopSLL = 14.U(UOPC_SZ.W)
val uopADD = 15.U(UOPC_SZ.W)
val uopSUB = 16.U(UOPC_SZ.W)
val uopSLT = 17.U(UOPC_SZ.W)
val uopSLTU = 18.U(UOPC_SZ.W)
val uopAND = 19.U(UOPC_SZ.W)
val uopOR = 20.U(UOPC_SZ.W)
val uopXOR = 21.U(UOPC_SZ.W)
val uopSRA = 22.U(UOPC_SZ.W)
val uopSRL = 23.U(UOPC_SZ.W)
val uopBEQ = 24.U(UOPC_SZ.W)
val uopBNE = 25.U(UOPC_SZ.W)
val uopBGE = 26.U(UOPC_SZ.W)
val uopBGEU = 27.U(UOPC_SZ.W)
val uopBLT = 28.U(UOPC_SZ.W)
val uopBLTU = 29.U(UOPC_SZ.W)
val uopCSRRW= 30.U(UOPC_SZ.W)
val uopCSRRS= 31.U(UOPC_SZ.W)
val uopCSRRC= 32.U(UOPC_SZ.W)
val uopCSRRWI=33.U(UOPC_SZ.W)
val uopCSRRSI=34.U(UOPC_SZ.W)
val uopCSRRCI=35.U(UOPC_SZ.W)
val uopJ = 36.U(UOPC_SZ.W)
val uopJAL = 37.U(UOPC_SZ.W)
val uopJALR = 38.U(UOPC_SZ.W)
val uopAUIPC= 39.U(UOPC_SZ.W)
//val uopSRET = 40.U(UOPC_SZ.W)
val uopCFLSH= 41.U(UOPC_SZ.W)
val uopFENCE= 42.U(UOPC_SZ.W)
val uopADDIW= 43.U(UOPC_SZ.W)
val uopADDW = 44.U(UOPC_SZ.W)
val uopSUBW = 45.U(UOPC_SZ.W)
val uopSLLIW= 46.U(UOPC_SZ.W)
val uopSLLW = 47.U(UOPC_SZ.W)
val uopSRAIW= 48.U(UOPC_SZ.W)
val uopSRAW = 49.U(UOPC_SZ.W)
val uopSRLIW= 50.U(UOPC_SZ.W)
val uopSRLW = 51.U(UOPC_SZ.W)
val uopMUL = 52.U(UOPC_SZ.W)
val uopMULH = 53.U(UOPC_SZ.W)
val uopMULHU= 54.U(UOPC_SZ.W)
val uopMULHSU=55.U(UOPC_SZ.W)
val uopMULW = 56.U(UOPC_SZ.W)
val uopDIV = 57.U(UOPC_SZ.W)
val uopDIVU = 58.U(UOPC_SZ.W)
val uopREM = 59.U(UOPC_SZ.W)
val uopREMU = 60.U(UOPC_SZ.W)
val uopDIVW = 61.U(UOPC_SZ.W)
val uopDIVUW= 62.U(UOPC_SZ.W)
val uopREMW = 63.U(UOPC_SZ.W)
val uopREMUW= 64.U(UOPC_SZ.W)
val uopFENCEI = 65.U(UOPC_SZ.W)
// = 66.U(UOPC_SZ.W)
val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen)
val uopFMV_W_X = 68.U(UOPC_SZ.W)
val uopFMV_D_X = 69.U(UOPC_SZ.W)
val uopFMV_X_W = 70.U(UOPC_SZ.W)
val uopFMV_X_D = 71.U(UOPC_SZ.W)
val uopFSGNJ_S = 72.U(UOPC_SZ.W)
val uopFSGNJ_D = 73.U(UOPC_SZ.W)
val uopFCVT_S_D = 74.U(UOPC_SZ.W)
val uopFCVT_D_S = 75.U(UOPC_SZ.W)
val uopFCVT_S_X = 76.U(UOPC_SZ.W)
val uopFCVT_D_X = 77.U(UOPC_SZ.W)
val uopFCVT_X_S = 78.U(UOPC_SZ.W)
val uopFCVT_X_D = 79.U(UOPC_SZ.W)
val uopCMPR_S = 80.U(UOPC_SZ.W)
val uopCMPR_D = 81.U(UOPC_SZ.W)
val uopFCLASS_S = 82.U(UOPC_SZ.W)
val uopFCLASS_D = 83.U(UOPC_SZ.W)
val uopFMINMAX_S = 84.U(UOPC_SZ.W)
val uopFMINMAX_D = 85.U(UOPC_SZ.W)
// = 86.U(UOPC_SZ.W)
val uopFADD_S = 87.U(UOPC_SZ.W)
val uopFSUB_S = 88.U(UOPC_SZ.W)
val uopFMUL_S = 89.U(UOPC_SZ.W)
val uopFADD_D = 90.U(UOPC_SZ.W)
val uopFSUB_D = 91.U(UOPC_SZ.W)
val uopFMUL_D = 92.U(UOPC_SZ.W)
val uopFMADD_S = 93.U(UOPC_SZ.W)
val uopFMSUB_S = 94.U(UOPC_SZ.W)
val uopFNMADD_S = 95.U(UOPC_SZ.W)
val uopFNMSUB_S = 96.U(UOPC_SZ.W)
val uopFMADD_D = 97.U(UOPC_SZ.W)
val uopFMSUB_D = 98.U(UOPC_SZ.W)
val uopFNMADD_D = 99.U(UOPC_SZ.W)
val uopFNMSUB_D = 100.U(UOPC_SZ.W)
val uopFDIV_S = 101.U(UOPC_SZ.W)
val uopFDIV_D = 102.U(UOPC_SZ.W)
val uopFSQRT_S = 103.U(UOPC_SZ.W)
val uopFSQRT_D = 104.U(UOPC_SZ.W)
val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline
val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET
val uopSFENCE = 107.U(UOPC_SZ.W)
val uopROCC = 108.U(UOPC_SZ.W)
val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2"
// The Bubble Instruction (Machine generated NOP)
// Insert (XOR x0,x0,x0) which is different from software compiler
// generated NOPs which are (ADDI x0, x0, 0).
// Reasoning for this is to let visualizers and stat-trackers differentiate
// between software NOPs and machine-generated Bubbles in the pipeline.
val BUBBLE = (0x4033).U(32.W)
def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = {
val uop = Wire(new boom.v3.common.MicroOp)
uop := DontCare // Overridden in the following lines
uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior
uop.bypassable := false.B
uop.fp_val := false.B
uop.uses_stq := false.B
uop.uses_ldq := false.B
uop.pdst := 0.U
uop.dst_rtype := RT_X
val cs = Wire(new boom.v3.common.CtrlSignals())
cs := DontCare // Overridden in the following lines
cs.br_type := BR_N
cs.csr_cmd := freechips.rocketchip.rocket.CSR.N
cs.is_load := false.B
cs.is_sta := false.B
cs.is_std := false.B
uop.ctrl := cs
uop
}
}
/**
* Mixin for RISCV constants
*/
trait RISCVConstants
{
// abstract out instruction decode magic numbers
val RD_MSB = 11
val RD_LSB = 7
val RS1_MSB = 19
val RS1_LSB = 15
val RS2_MSB = 24
val RS2_LSB = 20
val RS3_MSB = 31
val RS3_LSB = 27
val CSR_ADDR_MSB = 31
val CSR_ADDR_LSB = 20
val CSR_ADDR_SZ = 12
// location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.)
val SHAMT_5_BIT = 25
val LONGEST_IMM_SZ = 20
val X0 = 0.U
val RA = 1.U // return address register
// memory consistency model
// The C/C++ atomics MCM requires that two loads to the same address maintain program order.
// The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior).
val MCM_ORDER_DEPENDENT_LOADS = true
val jal_opc = (0x6f).U
val jalr_opc = (0x67).U
def GetUop(inst: UInt): UInt = inst(6,0)
def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB)
def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB)
def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = {
val rvc_exp = Module(new RVCExpander)
rvc_exp.io.in := inst
Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst)
}
// Note: Accepts only EXPANDED rvc instructions
def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = {
val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt
}
// Note: Accepts only EXPANDED rvc instructions
def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = {
val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt
}
// Note: Accepts only EXPANDED rvc instructions
def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = {
val bdecode = Module(new boom.v3.exu.BranchDecode)
bdecode.io.inst := inst
bdecode.io.pc := 0.U
bdecode.io.out.cfi_type
}
}
/**
* Mixin for exception cause constants
*/
trait ExcCauseConstants
{
// a memory disambigious misspeculation occurred
val MINI_EXCEPTION_MEM_ORDERING = 16.U
val MINI_EXCEPTION_CSR_REPLAY = 17.U
require (!freechips.rocketchip.rocket.Causes.all.contains(16))
require (!freechips.rocketchip.rocket.Causes.all.contains(17))
}
File issue-slot.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Issue Slot Logic
//--------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot.
// TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores.
// TODO Disable ldspec for FP queue.
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util._
import FUConstants._
/**
* IO bundle to interact with Issue slot
*
* @param numWakeupPorts number of wakeup ports for the slot
*/
class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle
{
val valid = Output(Bool())
val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely?
val request = Output(Bool())
val request_hp = Output(Bool())
val grant = Input(Bool())
val brupdate = Input(new BrUpdateInfo())
val kill = Input(Bool()) // pipeline flush
val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant)
val ldspec_miss = Input(Bool()) // Previous cycle's speculative load wakeup was mispredicted.
val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new IqWakeup(maxPregSz))))
val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W)))
val spec_ld_wakeup = Flipped(Vec(memWidth, Valid(UInt(width=maxPregSz.W))))
val in_uop = Flipped(Valid(new MicroOp())) // if valid, this WILL overwrite an entry!
val out_uop = Output(new MicroOp()) // the updated slot uop; will be shifted upwards in a collasping queue.
val uop = Output(new MicroOp()) // the current Slot's uop. Sent down the pipeline when issued.
val debug = {
val result = new Bundle {
val p1 = Bool()
val p2 = Bool()
val p3 = Bool()
val ppred = Bool()
val state = UInt(width=2.W)
}
Output(result)
}
}
/**
* Single issue slot. Holds a uop within the issue queue
*
* @param numWakeupPorts number of wakeup ports
*/
class IssueSlot(val numWakeupPorts: Int)(implicit p: Parameters)
extends BoomModule
with IssueUnitConstants
{
val io = IO(new IssueSlotIO(numWakeupPorts))
// slot invalid?
// slot is valid, holding 1 uop
// slot is valid, holds 2 uops (like a store)
def is_invalid = state === s_invalid
def is_valid = state =/= s_invalid
val next_state = Wire(UInt()) // the next state of this slot (which might then get moved to a new slot)
val next_uopc = Wire(UInt()) // the next uopc of this slot (which might then get moved to a new slot)
val next_lrs1_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot)
val next_lrs2_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot)
val state = RegInit(s_invalid)
val p1 = RegInit(false.B)
val p2 = RegInit(false.B)
val p3 = RegInit(false.B)
val ppred = RegInit(false.B)
// Poison if woken up by speculative load.
// Poison lasts 1 cycle (as ldMiss will come on the next cycle).
// SO if poisoned is true, set it to false!
val p1_poisoned = RegInit(false.B)
val p2_poisoned = RegInit(false.B)
p1_poisoned := false.B
p2_poisoned := false.B
val next_p1_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p1_poisoned, p1_poisoned)
val next_p2_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p2_poisoned, p2_poisoned)
val slot_uop = RegInit(NullMicroOp)
val next_uop = Mux(io.in_uop.valid, io.in_uop.bits, slot_uop)
//-----------------------------------------------------------------------------
// next slot state computation
// compute the next state for THIS entry slot (in a collasping queue, the
// current uop may get moved elsewhere, and a new uop can enter
when (io.kill) {
state := s_invalid
} .elsewhen (io.in_uop.valid) {
state := io.in_uop.bits.iw_state
} .elsewhen (io.clear) {
state := s_invalid
} .otherwise {
state := next_state
}
//-----------------------------------------------------------------------------
// "update" state
// compute the next state for the micro-op in this slot. This micro-op may
// be moved elsewhere, so the "next_state" travels with it.
// defaults
next_state := state
next_uopc := slot_uop.uopc
next_lrs1_rtype := slot_uop.lrs1_rtype
next_lrs2_rtype := slot_uop.lrs2_rtype
when (io.kill) {
next_state := s_invalid
} .elsewhen ((io.grant && (state === s_valid_1)) ||
(io.grant && (state === s_valid_2) && p1 && p2 && ppred)) {
// try to issue this uop.
when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) {
next_state := s_invalid
}
} .elsewhen (io.grant && (state === s_valid_2)) {
when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) {
next_state := s_valid_1
when (p1) {
slot_uop.uopc := uopSTD
next_uopc := uopSTD
slot_uop.lrs1_rtype := RT_X
next_lrs1_rtype := RT_X
} .otherwise {
slot_uop.lrs2_rtype := RT_X
next_lrs2_rtype := RT_X
}
}
}
when (io.in_uop.valid) {
slot_uop := io.in_uop.bits
assert (is_invalid || io.clear || io.kill, "trying to overwrite a valid issue slot.")
}
// Wakeup Compare Logic
// these signals are the "next_p*" for the current slot's micro-op.
// they are important for shifting the current slot_uop up to an other entry.
val next_p1 = WireInit(p1)
val next_p2 = WireInit(p2)
val next_p3 = WireInit(p3)
val next_ppred = WireInit(ppred)
when (io.in_uop.valid) {
p1 := !(io.in_uop.bits.prs1_busy)
p2 := !(io.in_uop.bits.prs2_busy)
p3 := !(io.in_uop.bits.prs3_busy)
ppred := !(io.in_uop.bits.ppred_busy)
}
when (io.ldspec_miss && next_p1_poisoned) {
assert(next_uop.prs1 =/= 0.U, "Poison bit can't be set for prs1=x0!")
p1 := false.B
}
when (io.ldspec_miss && next_p2_poisoned) {
assert(next_uop.prs2 =/= 0.U, "Poison bit can't be set for prs2=x0!")
p2 := false.B
}
for (i <- 0 until numWakeupPorts) {
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs1)) {
p1 := true.B
}
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs2)) {
p2 := true.B
}
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs3)) {
p3 := true.B
}
}
when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === next_uop.ppred) {
ppred := true.B
}
for (w <- 0 until memWidth) {
assert (!(io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === 0.U),
"Loads to x0 should never speculatively wakeup other instructions")
}
// TODO disable if FP IQ.
for (w <- 0 until memWidth) {
when (io.spec_ld_wakeup(w).valid &&
io.spec_ld_wakeup(w).bits === next_uop.prs1 &&
next_uop.lrs1_rtype === RT_FIX) {
p1 := true.B
p1_poisoned := true.B
assert (!next_p1_poisoned)
}
when (io.spec_ld_wakeup(w).valid &&
io.spec_ld_wakeup(w).bits === next_uop.prs2 &&
next_uop.lrs2_rtype === RT_FIX) {
p2 := true.B
p2_poisoned := true.B
assert (!next_p2_poisoned)
}
}
// Handle branch misspeculations
val next_br_mask = GetNewBrMask(io.brupdate, slot_uop)
// was this micro-op killed by a branch? if yes, we can't let it be valid if
// we compact it into an other entry
when (IsKilledByBranch(io.brupdate, slot_uop)) {
next_state := s_invalid
}
when (!io.in_uop.valid) {
slot_uop.br_mask := next_br_mask
}
//-------------------------------------------------------------
// Request Logic
io.request := is_valid && p1 && p2 && p3 && ppred && !io.kill
val high_priority = slot_uop.is_br || slot_uop.is_jal || slot_uop.is_jalr
io.request_hp := io.request && high_priority
when (state === s_valid_1) {
io.request := p1 && p2 && p3 && ppred && !io.kill
} .elsewhen (state === s_valid_2) {
io.request := (p1 || p2) && ppred && !io.kill
} .otherwise {
io.request := false.B
}
//assign outputs
io.valid := is_valid
io.uop := slot_uop
io.uop.iw_p1_poisoned := p1_poisoned
io.uop.iw_p2_poisoned := p2_poisoned
// micro-op will vacate due to grant.
val may_vacate = io.grant && ((state === s_valid_1) || (state === s_valid_2) && p1 && p2 && ppred)
val squash_grant = io.ldspec_miss && (p1_poisoned || p2_poisoned)
io.will_be_valid := is_valid && !(may_vacate && !squash_grant)
io.out_uop := slot_uop
io.out_uop.iw_state := next_state
io.out_uop.uopc := next_uopc
io.out_uop.lrs1_rtype := next_lrs1_rtype
io.out_uop.lrs2_rtype := next_lrs2_rtype
io.out_uop.br_mask := next_br_mask
io.out_uop.prs1_busy := !p1
io.out_uop.prs2_busy := !p2
io.out_uop.prs3_busy := !p3
io.out_uop.ppred_busy := !ppred
io.out_uop.iw_p1_poisoned := p1_poisoned
io.out_uop.iw_p2_poisoned := p2_poisoned
when (state === s_valid_2) {
when (p1 && p2 && ppred) {
; // send out the entire instruction as one uop
} .elsewhen (p1 && ppred) {
io.uop.uopc := slot_uop.uopc
io.uop.lrs2_rtype := RT_X
} .elsewhen (p2 && ppred) {
io.uop.uopc := uopSTD
io.uop.lrs1_rtype := RT_X
}
}
// debug outputs
io.debug.p1 := p1
io.debug.p2 := p2
io.debug.p3 := p3
io.debug.ppred := ppred
io.debug.state := state
}
| module IssueSlot_13( // @[issue-slot.scala:69:7]
input clock, // @[issue-slot.scala:69:7]
input reset, // @[issue-slot.scala:69:7]
output io_valid, // @[issue-slot.scala:73:14]
output io_will_be_valid, // @[issue-slot.scala:73:14]
output io_request, // @[issue-slot.scala:73:14]
output io_request_hp, // @[issue-slot.scala:73:14]
input io_grant, // @[issue-slot.scala:73:14]
input [15:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:73:14]
input [15:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_uopc, // @[issue-slot.scala:73:14]
input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:73:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:73:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_iq_type, // @[issue-slot.scala:73:14]
input [9:0] io_brupdate_b2_uop_fu_code, // @[issue-slot.scala:73:14]
input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_is_load, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_is_sta, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_is_std, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_iw_state, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_br, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_jalr, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_jal, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:73:14]
input [15:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:73:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_taken, // @[issue-slot.scala:73:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:73:14]
input [11:0] io_brupdate_b2_uop_csr_addr, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_exception, // @[issue-slot.scala:73:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_bypassable, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ldst_val, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_fp_single, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:73:14]
input io_brupdate_b2_valid, // @[issue-slot.scala:73:14]
input io_brupdate_b2_mispredict, // @[issue-slot.scala:73:14]
input io_brupdate_b2_taken, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:73:14]
input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:73:14]
input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:73:14]
input io_kill, // @[issue-slot.scala:73:14]
input io_clear, // @[issue-slot.scala:73:14]
input io_wakeup_ports_0_valid, // @[issue-slot.scala:73:14]
input [6:0] io_wakeup_ports_0_bits_pdst, // @[issue-slot.scala:73:14]
input io_wakeup_ports_1_valid, // @[issue-slot.scala:73:14]
input [6:0] io_wakeup_ports_1_bits_pdst, // @[issue-slot.scala:73:14]
input io_in_uop_valid, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_uopc, // @[issue-slot.scala:73:14]
input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:73:14]
input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_rvc, // @[issue-slot.scala:73:14]
input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_iq_type, // @[issue-slot.scala:73:14]
input [9:0] io_in_uop_bits_fu_code, // @[issue-slot.scala:73:14]
input [3:0] io_in_uop_bits_ctrl_br_type, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_ctrl_op1_sel, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_ctrl_op2_sel, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_ctrl_imm_sel, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_ctrl_op_fcn, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_is_load, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_is_sta, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_is_std, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_iw_state, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_br, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_jalr, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_jal, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_sfb, // @[issue-slot.scala:73:14]
input [15:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:73:14]
input [3:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:73:14]
input io_in_uop_bits_edge_inst, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:73:14]
input io_in_uop_bits_taken, // @[issue-slot.scala:73:14]
input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:73:14]
input [11:0] io_in_uop_bits_csr_addr, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_pdst, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_prs1, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_prs2, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_prs3, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_ppred, // @[issue-slot.scala:73:14]
input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:73:14]
input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:73:14]
input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:73:14]
input io_in_uop_bits_exception, // @[issue-slot.scala:73:14]
input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:73:14]
input io_in_uop_bits_bypassable, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:73:14]
input io_in_uop_bits_mem_signed, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_fence, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_fencei, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_amo, // @[issue-slot.scala:73:14]
input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:73:14]
input io_in_uop_bits_uses_stq, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_unique, // @[issue-slot.scala:73:14]
input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ldst_val, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:73:14]
input io_in_uop_bits_frs3_en, // @[issue-slot.scala:73:14]
input io_in_uop_bits_fp_val, // @[issue-slot.scala:73:14]
input io_in_uop_bits_fp_single, // @[issue-slot.scala:73:14]
input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_uopc, // @[issue-slot.scala:73:14]
output [31:0] io_out_uop_inst, // @[issue-slot.scala:73:14]
output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:73:14]
output io_out_uop_is_rvc, // @[issue-slot.scala:73:14]
output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_iq_type, // @[issue-slot.scala:73:14]
output [9:0] io_out_uop_fu_code, // @[issue-slot.scala:73:14]
output [3:0] io_out_uop_ctrl_br_type, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_is_load, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_is_sta, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_is_std, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_iw_state, // @[issue-slot.scala:73:14]
output io_out_uop_is_br, // @[issue-slot.scala:73:14]
output io_out_uop_is_jalr, // @[issue-slot.scala:73:14]
output io_out_uop_is_jal, // @[issue-slot.scala:73:14]
output io_out_uop_is_sfb, // @[issue-slot.scala:73:14]
output [15:0] io_out_uop_br_mask, // @[issue-slot.scala:73:14]
output [3:0] io_out_uop_br_tag, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_ftq_idx, // @[issue-slot.scala:73:14]
output io_out_uop_edge_inst, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:73:14]
output io_out_uop_taken, // @[issue-slot.scala:73:14]
output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:73:14]
output [11:0] io_out_uop_csr_addr, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_rob_idx, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_ldq_idx, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_stq_idx, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_pdst, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_prs1, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_prs2, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_prs3, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_ppred, // @[issue-slot.scala:73:14]
output io_out_uop_prs1_busy, // @[issue-slot.scala:73:14]
output io_out_uop_prs2_busy, // @[issue-slot.scala:73:14]
output io_out_uop_prs3_busy, // @[issue-slot.scala:73:14]
output io_out_uop_ppred_busy, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_stale_pdst, // @[issue-slot.scala:73:14]
output io_out_uop_exception, // @[issue-slot.scala:73:14]
output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:73:14]
output io_out_uop_bypassable, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:73:14]
output io_out_uop_mem_signed, // @[issue-slot.scala:73:14]
output io_out_uop_is_fence, // @[issue-slot.scala:73:14]
output io_out_uop_is_fencei, // @[issue-slot.scala:73:14]
output io_out_uop_is_amo, // @[issue-slot.scala:73:14]
output io_out_uop_uses_ldq, // @[issue-slot.scala:73:14]
output io_out_uop_uses_stq, // @[issue-slot.scala:73:14]
output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14]
output io_out_uop_is_unique, // @[issue-slot.scala:73:14]
output io_out_uop_flush_on_commit, // @[issue-slot.scala:73:14]
output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_ldst, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:73:14]
output io_out_uop_ldst_val, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:73:14]
output io_out_uop_frs3_en, // @[issue-slot.scala:73:14]
output io_out_uop_fp_val, // @[issue-slot.scala:73:14]
output io_out_uop_fp_single, // @[issue-slot.scala:73:14]
output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:73:14]
output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:73:14]
output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:73:14]
output io_out_uop_bp_debug_if, // @[issue-slot.scala:73:14]
output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:73:14]
output [6:0] io_uop_uopc, // @[issue-slot.scala:73:14]
output [31:0] io_uop_inst, // @[issue-slot.scala:73:14]
output [31:0] io_uop_debug_inst, // @[issue-slot.scala:73:14]
output io_uop_is_rvc, // @[issue-slot.scala:73:14]
output [39:0] io_uop_debug_pc, // @[issue-slot.scala:73:14]
output [2:0] io_uop_iq_type, // @[issue-slot.scala:73:14]
output [9:0] io_uop_fu_code, // @[issue-slot.scala:73:14]
output [3:0] io_uop_ctrl_br_type, // @[issue-slot.scala:73:14]
output [1:0] io_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14]
output [2:0] io_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14]
output [2:0] io_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14]
output [4:0] io_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14]
output io_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
output [2:0] io_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
output io_uop_ctrl_is_load, // @[issue-slot.scala:73:14]
output io_uop_ctrl_is_sta, // @[issue-slot.scala:73:14]
output io_uop_ctrl_is_std, // @[issue-slot.scala:73:14]
output [1:0] io_uop_iw_state, // @[issue-slot.scala:73:14]
output io_uop_is_br, // @[issue-slot.scala:73:14]
output io_uop_is_jalr, // @[issue-slot.scala:73:14]
output io_uop_is_jal, // @[issue-slot.scala:73:14]
output io_uop_is_sfb, // @[issue-slot.scala:73:14]
output [15:0] io_uop_br_mask, // @[issue-slot.scala:73:14]
output [3:0] io_uop_br_tag, // @[issue-slot.scala:73:14]
output [4:0] io_uop_ftq_idx, // @[issue-slot.scala:73:14]
output io_uop_edge_inst, // @[issue-slot.scala:73:14]
output [5:0] io_uop_pc_lob, // @[issue-slot.scala:73:14]
output io_uop_taken, // @[issue-slot.scala:73:14]
output [19:0] io_uop_imm_packed, // @[issue-slot.scala:73:14]
output [11:0] io_uop_csr_addr, // @[issue-slot.scala:73:14]
output [6:0] io_uop_rob_idx, // @[issue-slot.scala:73:14]
output [4:0] io_uop_ldq_idx, // @[issue-slot.scala:73:14]
output [4:0] io_uop_stq_idx, // @[issue-slot.scala:73:14]
output [1:0] io_uop_rxq_idx, // @[issue-slot.scala:73:14]
output [6:0] io_uop_pdst, // @[issue-slot.scala:73:14]
output [6:0] io_uop_prs1, // @[issue-slot.scala:73:14]
output [6:0] io_uop_prs2, // @[issue-slot.scala:73:14]
output [6:0] io_uop_prs3, // @[issue-slot.scala:73:14]
output [4:0] io_uop_ppred, // @[issue-slot.scala:73:14]
output io_uop_prs1_busy, // @[issue-slot.scala:73:14]
output io_uop_prs2_busy, // @[issue-slot.scala:73:14]
output io_uop_prs3_busy, // @[issue-slot.scala:73:14]
output io_uop_ppred_busy, // @[issue-slot.scala:73:14]
output [6:0] io_uop_stale_pdst, // @[issue-slot.scala:73:14]
output io_uop_exception, // @[issue-slot.scala:73:14]
output [63:0] io_uop_exc_cause, // @[issue-slot.scala:73:14]
output io_uop_bypassable, // @[issue-slot.scala:73:14]
output [4:0] io_uop_mem_cmd, // @[issue-slot.scala:73:14]
output [1:0] io_uop_mem_size, // @[issue-slot.scala:73:14]
output io_uop_mem_signed, // @[issue-slot.scala:73:14]
output io_uop_is_fence, // @[issue-slot.scala:73:14]
output io_uop_is_fencei, // @[issue-slot.scala:73:14]
output io_uop_is_amo, // @[issue-slot.scala:73:14]
output io_uop_uses_ldq, // @[issue-slot.scala:73:14]
output io_uop_uses_stq, // @[issue-slot.scala:73:14]
output io_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14]
output io_uop_is_unique, // @[issue-slot.scala:73:14]
output io_uop_flush_on_commit, // @[issue-slot.scala:73:14]
output io_uop_ldst_is_rs1, // @[issue-slot.scala:73:14]
output [5:0] io_uop_ldst, // @[issue-slot.scala:73:14]
output [5:0] io_uop_lrs1, // @[issue-slot.scala:73:14]
output [5:0] io_uop_lrs2, // @[issue-slot.scala:73:14]
output [5:0] io_uop_lrs3, // @[issue-slot.scala:73:14]
output io_uop_ldst_val, // @[issue-slot.scala:73:14]
output [1:0] io_uop_dst_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_uop_lrs1_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_uop_lrs2_rtype, // @[issue-slot.scala:73:14]
output io_uop_frs3_en, // @[issue-slot.scala:73:14]
output io_uop_fp_val, // @[issue-slot.scala:73:14]
output io_uop_fp_single, // @[issue-slot.scala:73:14]
output io_uop_xcpt_pf_if, // @[issue-slot.scala:73:14]
output io_uop_xcpt_ae_if, // @[issue-slot.scala:73:14]
output io_uop_xcpt_ma_if, // @[issue-slot.scala:73:14]
output io_uop_bp_debug_if, // @[issue-slot.scala:73:14]
output io_uop_bp_xcpt_if, // @[issue-slot.scala:73:14]
output [1:0] io_uop_debug_fsrc, // @[issue-slot.scala:73:14]
output [1:0] io_uop_debug_tsrc, // @[issue-slot.scala:73:14]
output io_debug_p1, // @[issue-slot.scala:73:14]
output io_debug_p2, // @[issue-slot.scala:73:14]
output io_debug_p3, // @[issue-slot.scala:73:14]
output io_debug_ppred, // @[issue-slot.scala:73:14]
output [1:0] io_debug_state // @[issue-slot.scala:73:14]
);
wire io_grant_0 = io_grant; // @[issue-slot.scala:69:7]
wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:69:7]
wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[issue-slot.scala:69:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:69:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:69:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[issue-slot.scala:69:7]
wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[issue-slot.scala:69:7]
wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:69:7]
wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:69:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:69:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:69:7]
wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:69:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:69:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:69:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:69:7]
wire io_kill_0 = io_kill; // @[issue-slot.scala:69:7]
wire io_clear_0 = io_clear; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_wakeup_ports_0_bits_pdst_0 = io_wakeup_ports_0_bits_pdst; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_wakeup_ports_1_bits_pdst_0 = io_wakeup_ports_1_bits_pdst; // @[issue-slot.scala:69:7]
wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_uopc_0 = io_in_uop_bits_uopc; // @[issue-slot.scala:69:7]
wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:69:7]
wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:69:7]
wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_iq_type_0 = io_in_uop_bits_iq_type; // @[issue-slot.scala:69:7]
wire [9:0] io_in_uop_bits_fu_code_0 = io_in_uop_bits_fu_code; // @[issue-slot.scala:69:7]
wire [3:0] io_in_uop_bits_ctrl_br_type_0 = io_in_uop_bits_ctrl_br_type; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_ctrl_op1_sel_0 = io_in_uop_bits_ctrl_op1_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_ctrl_op2_sel_0 = io_in_uop_bits_ctrl_op2_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_ctrl_imm_sel_0 = io_in_uop_bits_ctrl_imm_sel; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_ctrl_op_fcn_0 = io_in_uop_bits_ctrl_op_fcn; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_fcn_dw_0 = io_in_uop_bits_ctrl_fcn_dw; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_ctrl_csr_cmd_0 = io_in_uop_bits_ctrl_csr_cmd; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_is_load_0 = io_in_uop_bits_ctrl_is_load; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_is_sta_0 = io_in_uop_bits_ctrl_is_sta; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_is_std_0 = io_in_uop_bits_ctrl_is_std; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_iw_state_0 = io_in_uop_bits_iw_state; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_br_0 = io_in_uop_bits_is_br; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_jalr_0 = io_in_uop_bits_is_jalr; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_jal_0 = io_in_uop_bits_is_jal; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:69:7]
wire [15:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:69:7]
wire [3:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:69:7]
wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:69:7]
wire [11:0] io_in_uop_bits_csr_addr_0 = io_in_uop_bits_csr_addr; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:69:7]
wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_bypassable_0 = io_in_uop_bits_bypassable; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ldst_val_0 = io_in_uop_bits_ldst_val; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_fp_single_0 = io_in_uop_bits_fp_single; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:69:7]
wire io_ldspec_miss = 1'h0; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_0_bits_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_1_bits_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_pred_wakeup_port_valid = 1'h0; // @[issue-slot.scala:69:7]
wire io_spec_ld_wakeup_0_valid = 1'h0; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_iw_p1_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_iw_p2_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_out_uop_iw_p1_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_out_uop_iw_p2_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_uop_iw_p1_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire io_uop_iw_p2_poisoned = 1'h0; // @[issue-slot.scala:69:7]
wire next_p1_poisoned = 1'h0; // @[issue-slot.scala:99:29]
wire next_p2_poisoned = 1'h0; // @[issue-slot.scala:100:29]
wire slot_uop_uop_is_rvc = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_br = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_jalr = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_jal = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_sfb = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_edge_inst = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_taken = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_prs1_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_prs2_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_prs3_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ppred_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_exception = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_bypassable = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_mem_signed = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_fence = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_fencei = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_amo = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_uses_ldq = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_uses_stq = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_unique = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ldst_val = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_frs3_en = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_fp_val = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_fp_single = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_cs_fcn_dw = 1'h0; // @[consts.scala:279:18]
wire slot_uop_cs_is_load = 1'h0; // @[consts.scala:279:18]
wire slot_uop_cs_is_sta = 1'h0; // @[consts.scala:279:18]
wire slot_uop_cs_is_std = 1'h0; // @[consts.scala:279:18]
wire _squash_grant_T = 1'h0; // @[issue-slot.scala:261:53]
wire squash_grant = 1'h0; // @[issue-slot.scala:261:37]
wire [4:0] io_pred_wakeup_port_bits = 5'h0; // @[issue-slot.scala:69:7]
wire [4:0] slot_uop_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_ftq_idx = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_ldq_idx = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_stq_idx = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_ppred = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_mem_cmd = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_cs_op_fcn = 5'h0; // @[consts.scala:279:18]
wire [6:0] io_spec_ld_wakeup_0_bits = 7'h0; // @[issue-slot.scala:69:7]
wire [6:0] slot_uop_uop_uopc = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_rob_idx = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_pdst = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_prs1 = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_prs2 = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_prs3 = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_stale_pdst = 7'h0; // @[consts.scala:269:19]
wire _io_will_be_valid_T_1 = 1'h1; // @[issue-slot.scala:262:51]
wire [1:0] slot_uop_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_iw_state = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_rxq_idx = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_mem_size = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_cs_op1_sel = 2'h0; // @[consts.scala:279:18]
wire [2:0] slot_uop_uop_iq_type = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_cs_op2_sel = 3'h0; // @[consts.scala:279:18]
wire [2:0] slot_uop_cs_imm_sel = 3'h0; // @[consts.scala:279:18]
wire [2:0] slot_uop_cs_csr_cmd = 3'h0; // @[consts.scala:279:18]
wire [3:0] slot_uop_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19]
wire [3:0] slot_uop_uop_br_tag = 4'h0; // @[consts.scala:269:19]
wire [3:0] slot_uop_cs_br_type = 4'h0; // @[consts.scala:279:18]
wire [1:0] slot_uop_uop_dst_rtype = 2'h2; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_pc_lob = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_ldst = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_lrs1 = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_lrs2 = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_lrs3 = 6'h0; // @[consts.scala:269:19]
wire [63:0] slot_uop_uop_exc_cause = 64'h0; // @[consts.scala:269:19]
wire [11:0] slot_uop_uop_csr_addr = 12'h0; // @[consts.scala:269:19]
wire [19:0] slot_uop_uop_imm_packed = 20'h0; // @[consts.scala:269:19]
wire [15:0] slot_uop_uop_br_mask = 16'h0; // @[consts.scala:269:19]
wire [9:0] slot_uop_uop_fu_code = 10'h0; // @[consts.scala:269:19]
wire [39:0] slot_uop_uop_debug_pc = 40'h0; // @[consts.scala:269:19]
wire [31:0] slot_uop_uop_inst = 32'h0; // @[consts.scala:269:19]
wire [31:0] slot_uop_uop_debug_inst = 32'h0; // @[consts.scala:269:19]
wire _io_valid_T; // @[issue-slot.scala:79:24]
wire _io_will_be_valid_T_4; // @[issue-slot.scala:262:32]
wire _io_request_hp_T; // @[issue-slot.scala:243:31]
wire [6:0] next_uopc; // @[issue-slot.scala:82:29]
wire [1:0] next_state; // @[issue-slot.scala:81:29]
wire [15:0] next_br_mask; // @[util.scala:85:25]
wire _io_out_uop_prs1_busy_T; // @[issue-slot.scala:270:28]
wire _io_out_uop_prs2_busy_T; // @[issue-slot.scala:271:28]
wire _io_out_uop_prs3_busy_T; // @[issue-slot.scala:272:28]
wire _io_out_uop_ppred_busy_T; // @[issue-slot.scala:273:28]
wire [1:0] next_lrs1_rtype; // @[issue-slot.scala:83:29]
wire [1:0] next_lrs2_rtype; // @[issue-slot.scala:84:29]
wire [3:0] io_out_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_uopc_0; // @[issue-slot.scala:69:7]
wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:69:7]
wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_rvc_0; // @[issue-slot.scala:69:7]
wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_iq_type_0; // @[issue-slot.scala:69:7]
wire [9:0] io_out_uop_fu_code_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_iw_state_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_br_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_jalr_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_jal_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_sfb_0; // @[issue-slot.scala:69:7]
wire [15:0] io_out_uop_br_mask_0; // @[issue-slot.scala:69:7]
wire [3:0] io_out_uop_br_tag_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:69:7]
wire io_out_uop_edge_inst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:69:7]
wire io_out_uop_taken_0; // @[issue-slot.scala:69:7]
wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:69:7]
wire [11:0] io_out_uop_csr_addr_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_pdst_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_prs1_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_prs2_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_prs3_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_ppred_0; // @[issue-slot.scala:69:7]
wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:69:7]
wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:69:7]
wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:69:7]
wire io_out_uop_exception_0; // @[issue-slot.scala:69:7]
wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:69:7]
wire io_out_uop_bypassable_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:69:7]
wire io_out_uop_mem_signed_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_fence_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_fencei_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_amo_0; // @[issue-slot.scala:69:7]
wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:69:7]
wire io_out_uop_uses_stq_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_unique_0; // @[issue-slot.scala:69:7]
wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ldst_val_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7]
wire io_out_uop_frs3_en_0; // @[issue-slot.scala:69:7]
wire io_out_uop_fp_val_0; // @[issue-slot.scala:69:7]
wire io_out_uop_fp_single_0; // @[issue-slot.scala:69:7]
wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:69:7]
wire [3:0] io_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_uopc_0; // @[issue-slot.scala:69:7]
wire [31:0] io_uop_inst_0; // @[issue-slot.scala:69:7]
wire [31:0] io_uop_debug_inst_0; // @[issue-slot.scala:69:7]
wire io_uop_is_rvc_0; // @[issue-slot.scala:69:7]
wire [39:0] io_uop_debug_pc_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_iq_type_0; // @[issue-slot.scala:69:7]
wire [9:0] io_uop_fu_code_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_iw_state_0; // @[issue-slot.scala:69:7]
wire io_uop_is_br_0; // @[issue-slot.scala:69:7]
wire io_uop_is_jalr_0; // @[issue-slot.scala:69:7]
wire io_uop_is_jal_0; // @[issue-slot.scala:69:7]
wire io_uop_is_sfb_0; // @[issue-slot.scala:69:7]
wire [15:0] io_uop_br_mask_0; // @[issue-slot.scala:69:7]
wire [3:0] io_uop_br_tag_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_ftq_idx_0; // @[issue-slot.scala:69:7]
wire io_uop_edge_inst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_pc_lob_0; // @[issue-slot.scala:69:7]
wire io_uop_taken_0; // @[issue-slot.scala:69:7]
wire [19:0] io_uop_imm_packed_0; // @[issue-slot.scala:69:7]
wire [11:0] io_uop_csr_addr_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_rob_idx_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_ldq_idx_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_stq_idx_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_rxq_idx_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_pdst_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_prs1_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_prs2_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_prs3_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_ppred_0; // @[issue-slot.scala:69:7]
wire io_uop_prs1_busy_0; // @[issue-slot.scala:69:7]
wire io_uop_prs2_busy_0; // @[issue-slot.scala:69:7]
wire io_uop_prs3_busy_0; // @[issue-slot.scala:69:7]
wire io_uop_ppred_busy_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_stale_pdst_0; // @[issue-slot.scala:69:7]
wire io_uop_exception_0; // @[issue-slot.scala:69:7]
wire [63:0] io_uop_exc_cause_0; // @[issue-slot.scala:69:7]
wire io_uop_bypassable_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_mem_cmd_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_mem_size_0; // @[issue-slot.scala:69:7]
wire io_uop_mem_signed_0; // @[issue-slot.scala:69:7]
wire io_uop_is_fence_0; // @[issue-slot.scala:69:7]
wire io_uop_is_fencei_0; // @[issue-slot.scala:69:7]
wire io_uop_is_amo_0; // @[issue-slot.scala:69:7]
wire io_uop_uses_ldq_0; // @[issue-slot.scala:69:7]
wire io_uop_uses_stq_0; // @[issue-slot.scala:69:7]
wire io_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7]
wire io_uop_is_unique_0; // @[issue-slot.scala:69:7]
wire io_uop_flush_on_commit_0; // @[issue-slot.scala:69:7]
wire io_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_ldst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_lrs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_lrs2_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_lrs3_0; // @[issue-slot.scala:69:7]
wire io_uop_ldst_val_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_dst_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7]
wire io_uop_frs3_en_0; // @[issue-slot.scala:69:7]
wire io_uop_fp_val_0; // @[issue-slot.scala:69:7]
wire io_uop_fp_single_0; // @[issue-slot.scala:69:7]
wire io_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7]
wire io_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7]
wire io_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7]
wire io_uop_bp_debug_if_0; // @[issue-slot.scala:69:7]
wire io_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_debug_fsrc_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_debug_tsrc_0; // @[issue-slot.scala:69:7]
wire io_debug_p1_0; // @[issue-slot.scala:69:7]
wire io_debug_p2_0; // @[issue-slot.scala:69:7]
wire io_debug_p3_0; // @[issue-slot.scala:69:7]
wire io_debug_ppred_0; // @[issue-slot.scala:69:7]
wire [1:0] io_debug_state_0; // @[issue-slot.scala:69:7]
wire io_valid_0; // @[issue-slot.scala:69:7]
wire io_will_be_valid_0; // @[issue-slot.scala:69:7]
wire io_request_0; // @[issue-slot.scala:69:7]
wire io_request_hp_0; // @[issue-slot.scala:69:7]
assign io_out_uop_iw_state_0 = next_state; // @[issue-slot.scala:69:7, :81:29]
assign io_out_uop_uopc_0 = next_uopc; // @[issue-slot.scala:69:7, :82:29]
assign io_out_uop_lrs1_rtype_0 = next_lrs1_rtype; // @[issue-slot.scala:69:7, :83:29]
assign io_out_uop_lrs2_rtype_0 = next_lrs2_rtype; // @[issue-slot.scala:69:7, :84:29]
reg [1:0] state; // @[issue-slot.scala:86:22]
assign io_debug_state_0 = state; // @[issue-slot.scala:69:7, :86:22]
reg p1; // @[issue-slot.scala:87:22]
assign io_debug_p1_0 = p1; // @[issue-slot.scala:69:7, :87:22]
wire next_p1 = p1; // @[issue-slot.scala:87:22, :163:25]
reg p2; // @[issue-slot.scala:88:22]
assign io_debug_p2_0 = p2; // @[issue-slot.scala:69:7, :88:22]
wire next_p2 = p2; // @[issue-slot.scala:88:22, :164:25]
reg p3; // @[issue-slot.scala:89:22]
assign io_debug_p3_0 = p3; // @[issue-slot.scala:69:7, :89:22]
wire next_p3 = p3; // @[issue-slot.scala:89:22, :165:25]
reg ppred; // @[issue-slot.scala:90:22]
assign io_debug_ppred_0 = ppred; // @[issue-slot.scala:69:7, :90:22]
wire next_ppred = ppred; // @[issue-slot.scala:90:22, :166:28]
reg [6:0] slot_uop_uopc; // @[issue-slot.scala:102:25]
reg [31:0] slot_uop_inst; // @[issue-slot.scala:102:25]
assign io_out_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25]
reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_rvc; // @[issue-slot.scala:102:25]
assign io_out_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25]
reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_iq_type; // @[issue-slot.scala:102:25]
assign io_out_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25]
reg [9:0] slot_uop_fu_code; // @[issue-slot.scala:102:25]
assign io_out_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25]
reg [3:0] slot_uop_ctrl_br_type; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_ctrl_op1_sel; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_ctrl_op2_sel; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_ctrl_imm_sel; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_ctrl_op_fcn; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_is_load; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_is_sta; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_is_std; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_iw_state; // @[issue-slot.scala:102:25]
assign io_uop_iw_state_0 = slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_iw_p1_poisoned; // @[issue-slot.scala:102:25]
reg slot_uop_iw_p2_poisoned; // @[issue-slot.scala:102:25]
reg slot_uop_is_br; // @[issue-slot.scala:102:25]
assign io_out_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_jalr; // @[issue-slot.scala:102:25]
assign io_out_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_jal; // @[issue-slot.scala:102:25]
assign io_out_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_sfb; // @[issue-slot.scala:102:25]
assign io_out_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25]
reg [15:0] slot_uop_br_mask; // @[issue-slot.scala:102:25]
assign io_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25]
reg [3:0] slot_uop_br_tag; // @[issue-slot.scala:102:25]
assign io_out_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_ftq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_edge_inst; // @[issue-slot.scala:102:25]
assign io_out_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:102:25]
assign io_out_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_taken; // @[issue-slot.scala:102:25]
assign io_out_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25]
reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:102:25]
assign io_out_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25]
reg [11:0] slot_uop_csr_addr; // @[issue-slot.scala:102:25]
assign io_out_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_rob_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_ldq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_stq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_pdst; // @[issue-slot.scala:102:25]
assign io_out_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_prs1; // @[issue-slot.scala:102:25]
assign io_out_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_prs2; // @[issue-slot.scala:102:25]
assign io_out_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_prs3; // @[issue-slot.scala:102:25]
assign io_out_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_ppred; // @[issue-slot.scala:102:25]
assign io_out_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_prs1_busy; // @[issue-slot.scala:102:25]
assign io_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_prs2_busy; // @[issue-slot.scala:102:25]
assign io_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_prs3_busy; // @[issue-slot.scala:102:25]
assign io_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ppred_busy; // @[issue-slot.scala:102:25]
assign io_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_stale_pdst; // @[issue-slot.scala:102:25]
assign io_out_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_exception; // @[issue-slot.scala:102:25]
assign io_out_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25]
reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:102:25]
assign io_out_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_bypassable; // @[issue-slot.scala:102:25]
assign io_out_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:102:25]
assign io_out_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:102:25]
assign io_out_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_mem_signed; // @[issue-slot.scala:102:25]
assign io_out_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_fence; // @[issue-slot.scala:102:25]
assign io_out_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_fencei; // @[issue-slot.scala:102:25]
assign io_out_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_amo; // @[issue-slot.scala:102:25]
assign io_out_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_uses_ldq; // @[issue-slot.scala:102:25]
assign io_out_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_uses_stq; // @[issue-slot.scala:102:25]
assign io_out_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:102:25]
assign io_out_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_unique; // @[issue-slot.scala:102:25]
assign io_out_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_flush_on_commit; // @[issue-slot.scala:102:25]
assign io_out_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:102:25]
assign io_out_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_ldst; // @[issue-slot.scala:102:25]
assign io_out_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:102:25]
assign io_out_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:102:25]
assign io_out_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:102:25]
assign io_out_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ldst_val; // @[issue-slot.scala:102:25]
assign io_out_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:102:25]
assign io_out_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:102:25]
reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:102:25]
reg slot_uop_frs3_en; // @[issue-slot.scala:102:25]
assign io_out_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_fp_val; // @[issue-slot.scala:102:25]
assign io_out_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_fp_single; // @[issue-slot.scala:102:25]
assign io_out_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:102:25]
assign io_out_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:102:25]
assign io_out_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:102:25]
assign io_out_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_bp_debug_if; // @[issue-slot.scala:102:25]
assign io_out_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:102:25]
assign io_out_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_debug_fsrc; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_debug_tsrc; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25]
wire [6:0] next_uop_uopc = io_in_uop_valid_0 ? io_in_uop_bits_uopc_0 : slot_uop_uopc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [31:0] next_uop_inst = io_in_uop_valid_0 ? io_in_uop_bits_inst_0 : slot_uop_inst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [31:0] next_uop_debug_inst = io_in_uop_valid_0 ? io_in_uop_bits_debug_inst_0 : slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_rvc = io_in_uop_valid_0 ? io_in_uop_bits_is_rvc_0 : slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [39:0] next_uop_debug_pc = io_in_uop_valid_0 ? io_in_uop_bits_debug_pc_0 : slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_iq_type = io_in_uop_valid_0 ? io_in_uop_bits_iq_type_0 : slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [9:0] next_uop_fu_code = io_in_uop_valid_0 ? io_in_uop_bits_fu_code_0 : slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [3:0] next_uop_ctrl_br_type = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_br_type_0 : slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_ctrl_op1_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op1_sel_0 : slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_ctrl_op2_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op2_sel_0 : slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_ctrl_imm_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_imm_sel_0 : slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_ctrl_op_fcn = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op_fcn_0 : slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_fcn_dw = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_fcn_dw_0 : slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_ctrl_csr_cmd = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_csr_cmd_0 : slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_is_load = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_load_0 : slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_is_sta = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_sta_0 : slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_is_std = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_std_0 : slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_iw_state = io_in_uop_valid_0 ? io_in_uop_bits_iw_state_0 : slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_iw_p1_poisoned = ~io_in_uop_valid_0 & slot_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_iw_p2_poisoned = ~io_in_uop_valid_0 & slot_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_br = io_in_uop_valid_0 ? io_in_uop_bits_is_br_0 : slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_jalr = io_in_uop_valid_0 ? io_in_uop_bits_is_jalr_0 : slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_jal = io_in_uop_valid_0 ? io_in_uop_bits_is_jal_0 : slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_sfb = io_in_uop_valid_0 ? io_in_uop_bits_is_sfb_0 : slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [15:0] next_uop_br_mask = io_in_uop_valid_0 ? io_in_uop_bits_br_mask_0 : slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [3:0] next_uop_br_tag = io_in_uop_valid_0 ? io_in_uop_bits_br_tag_0 : slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_ftq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ftq_idx_0 : slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_edge_inst = io_in_uop_valid_0 ? io_in_uop_bits_edge_inst_0 : slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_pc_lob = io_in_uop_valid_0 ? io_in_uop_bits_pc_lob_0 : slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_taken = io_in_uop_valid_0 ? io_in_uop_bits_taken_0 : slot_uop_taken; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [19:0] next_uop_imm_packed = io_in_uop_valid_0 ? io_in_uop_bits_imm_packed_0 : slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [11:0] next_uop_csr_addr = io_in_uop_valid_0 ? io_in_uop_bits_csr_addr_0 : slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_rob_idx = io_in_uop_valid_0 ? io_in_uop_bits_rob_idx_0 : slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_ldq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ldq_idx_0 : slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_stq_idx = io_in_uop_valid_0 ? io_in_uop_bits_stq_idx_0 : slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_rxq_idx = io_in_uop_valid_0 ? io_in_uop_bits_rxq_idx_0 : slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_pdst = io_in_uop_valid_0 ? io_in_uop_bits_pdst_0 : slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_prs1 = io_in_uop_valid_0 ? io_in_uop_bits_prs1_0 : slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_prs2 = io_in_uop_valid_0 ? io_in_uop_bits_prs2_0 : slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_prs3 = io_in_uop_valid_0 ? io_in_uop_bits_prs3_0 : slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_ppred = io_in_uop_valid_0 ? io_in_uop_bits_ppred_0 : slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_prs1_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs1_busy_0 : slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_prs2_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs2_busy_0 : slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_prs3_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs3_busy_0 : slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ppred_busy = io_in_uop_valid_0 ? io_in_uop_bits_ppred_busy_0 : slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_stale_pdst = io_in_uop_valid_0 ? io_in_uop_bits_stale_pdst_0 : slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_exception = io_in_uop_valid_0 ? io_in_uop_bits_exception_0 : slot_uop_exception; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [63:0] next_uop_exc_cause = io_in_uop_valid_0 ? io_in_uop_bits_exc_cause_0 : slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_bypassable = io_in_uop_valid_0 ? io_in_uop_bits_bypassable_0 : slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_mem_cmd = io_in_uop_valid_0 ? io_in_uop_bits_mem_cmd_0 : slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_mem_size = io_in_uop_valid_0 ? io_in_uop_bits_mem_size_0 : slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_mem_signed = io_in_uop_valid_0 ? io_in_uop_bits_mem_signed_0 : slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_fence = io_in_uop_valid_0 ? io_in_uop_bits_is_fence_0 : slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_fencei = io_in_uop_valid_0 ? io_in_uop_bits_is_fencei_0 : slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_amo = io_in_uop_valid_0 ? io_in_uop_bits_is_amo_0 : slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_uses_ldq = io_in_uop_valid_0 ? io_in_uop_bits_uses_ldq_0 : slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_uses_stq = io_in_uop_valid_0 ? io_in_uop_bits_uses_stq_0 : slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_sys_pc2epc = io_in_uop_valid_0 ? io_in_uop_bits_is_sys_pc2epc_0 : slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_unique = io_in_uop_valid_0 ? io_in_uop_bits_is_unique_0 : slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_flush_on_commit = io_in_uop_valid_0 ? io_in_uop_bits_flush_on_commit_0 : slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ldst_is_rs1 = io_in_uop_valid_0 ? io_in_uop_bits_ldst_is_rs1_0 : slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_ldst = io_in_uop_valid_0 ? io_in_uop_bits_ldst_0 : slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_lrs1 = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_0 : slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_lrs2 = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_0 : slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_lrs3 = io_in_uop_valid_0 ? io_in_uop_bits_lrs3_0 : slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ldst_val = io_in_uop_valid_0 ? io_in_uop_bits_ldst_val_0 : slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_dst_rtype = io_in_uop_valid_0 ? io_in_uop_bits_dst_rtype_0 : slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_lrs1_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_rtype_0 : slot_uop_lrs1_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_lrs2_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_rtype_0 : slot_uop_lrs2_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_frs3_en = io_in_uop_valid_0 ? io_in_uop_bits_frs3_en_0 : slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_fp_val = io_in_uop_valid_0 ? io_in_uop_bits_fp_val_0 : slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_fp_single = io_in_uop_valid_0 ? io_in_uop_bits_fp_single_0 : slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_xcpt_pf_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_pf_if_0 : slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_xcpt_ae_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ae_if_0 : slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_xcpt_ma_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ma_if_0 : slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_bp_debug_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_debug_if_0 : slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_bp_xcpt_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_xcpt_if_0 : slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_debug_fsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_fsrc_0 : slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_debug_tsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_tsrc_0 : slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire _T_11 = state == 2'h2; // @[issue-slot.scala:86:22, :134:25]
wire _T_7 = io_grant_0 & state == 2'h1 | io_grant_0 & _T_11 & p1 & p2 & ppred; // @[issue-slot.scala:69:7, :86:22, :87:22, :88:22, :90:22, :133:{26,36,52}, :134:{15,25,40,46,52}]
wire _T_12 = io_grant_0 & _T_11; // @[issue-slot.scala:69:7, :134:25, :139:25]
wire _GEN = io_kill_0 | _T_7; // @[issue-slot.scala:69:7, :102:25, :131:18, :133:52, :134:63, :139:51]
wire _GEN_0 = _GEN | ~(_T_12 & p1); // @[issue-slot.scala:87:22, :102:25, :131:18, :134:63, :139:{25,51}, :140:62, :142:17, :143:23]
assign next_uopc = _GEN_0 ? slot_uop_uopc : 7'h3; // @[issue-slot.scala:82:29, :102:25, :131:18, :134:63, :139:51]
assign next_lrs1_rtype = _GEN_0 ? slot_uop_lrs1_rtype : 2'h2; // @[issue-slot.scala:83:29, :102:25, :131:18, :134:63, :139:51]
wire _GEN_1 = _GEN | ~_T_12 | p1; // @[issue-slot.scala:87:22, :102:25, :131:18, :134:63, :139:{25,51}, :140:62, :142:17]
assign next_lrs2_rtype = _GEN_1 ? slot_uop_lrs2_rtype : 2'h2; // @[issue-slot.scala:84:29, :102:25, :131:18, :134:63, :139:51, :140:62, :142:17] |
Generate the Verilog code corresponding to the following Chisel files.
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File Nodes.scala:
package constellation.channel
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.diplomacy._
case class EmptyParams()
case class ChannelEdgeParams(cp: ChannelParams, p: Parameters)
object ChannelImp extends SimpleNodeImp[EmptyParams, ChannelParams, ChannelEdgeParams, Channel] {
def edge(pd: EmptyParams, pu: ChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
ChannelEdgeParams(pu, p)
}
def bundle(e: ChannelEdgeParams) = new Channel(e.cp)(e.p)
def render(e: ChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#0000ff", label = e.cp.payloadBits.toString)
}
override def monitor(bundle: Channel, edge: ChannelEdgeParams): Unit = {
val monitor = Module(new NoCMonitor(edge.cp)(edge.p))
monitor.io.in := bundle
}
// TODO: Add nodepath stuff? override def mixO, override def mixI
}
case class ChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(ChannelImp)(Seq(EmptyParams()))
case class ChannelDestNode(val destParams: ChannelParams)(implicit valName: ValName) extends SinkNode(ChannelImp)(Seq(destParams))
case class ChannelAdapterNode(
slaveFn: ChannelParams => ChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(ChannelImp)((e: EmptyParams) => e, slaveFn)
case class ChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(ChannelImp)()
case class ChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(ChannelImp)()
case class IngressChannelEdgeParams(cp: IngressChannelParams, p: Parameters)
case class EgressChannelEdgeParams(cp: EgressChannelParams, p: Parameters)
object IngressChannelImp extends SimpleNodeImp[EmptyParams, IngressChannelParams, IngressChannelEdgeParams, IngressChannel] {
def edge(pd: EmptyParams, pu: IngressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
IngressChannelEdgeParams(pu, p)
}
def bundle(e: IngressChannelEdgeParams) = new IngressChannel(e.cp)(e.p)
def render(e: IngressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#00ff00", label = e.cp.payloadBits.toString)
}
}
object EgressChannelImp extends SimpleNodeImp[EmptyParams, EgressChannelParams, EgressChannelEdgeParams, EgressChannel] {
def edge(pd: EmptyParams, pu: EgressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
EgressChannelEdgeParams(pu, p)
}
def bundle(e: EgressChannelEdgeParams) = new EgressChannel(e.cp)(e.p)
def render(e: EgressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#ff0000", label = e.cp.payloadBits.toString)
}
}
case class IngressChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(IngressChannelImp)(Seq(EmptyParams()))
case class IngressChannelDestNode(val destParams: IngressChannelParams)(implicit valName: ValName) extends SinkNode(IngressChannelImp)(Seq(destParams))
case class EgressChannelSourceNode(val egressId: Int)(implicit valName: ValName) extends SourceNode(EgressChannelImp)(Seq(EmptyParams()))
case class EgressChannelDestNode(val destParams: EgressChannelParams)(implicit valName: ValName) extends SinkNode(EgressChannelImp)(Seq(destParams))
case class IngressChannelAdapterNode(
slaveFn: IngressChannelParams => IngressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(IngressChannelImp)(m => m, slaveFn)
case class EgressChannelAdapterNode(
slaveFn: EgressChannelParams => EgressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(EgressChannelImp)(m => m, slaveFn)
case class IngressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(IngressChannelImp)()
case class EgressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(EgressChannelImp)()
case class IngressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(IngressChannelImp)()
case class EgressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(EgressChannelImp)()
File Router.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{RoutingRelation}
import constellation.noc.{HasNoCParams}
case class UserRouterParams(
// Payload width. Must match payload width on all channels attached to this routing node
payloadBits: Int = 64,
// Combines SA and ST stages (removes pipeline register)
combineSAST: Boolean = false,
// Combines RC and VA stages (removes pipeline register)
combineRCVA: Boolean = false,
// Adds combinational path from SA to VA
coupleSAVA: Boolean = false,
vcAllocator: VCAllocatorParams => Parameters => VCAllocator = (vP) => (p) => new RotatingSingleVCAllocator(vP)(p)
)
case class RouterParams(
nodeId: Int,
nIngress: Int,
nEgress: Int,
user: UserRouterParams
)
trait HasRouterOutputParams {
def outParams: Seq[ChannelParams]
def egressParams: Seq[EgressChannelParams]
def allOutParams = outParams ++ egressParams
def nOutputs = outParams.size
def nEgress = egressParams.size
def nAllOutputs = allOutParams.size
}
trait HasRouterInputParams {
def inParams: Seq[ChannelParams]
def ingressParams: Seq[IngressChannelParams]
def allInParams = inParams ++ ingressParams
def nInputs = inParams.size
def nIngress = ingressParams.size
def nAllInputs = allInParams.size
}
trait HasRouterParams
{
def routerParams: RouterParams
def nodeId = routerParams.nodeId
def payloadBits = routerParams.user.payloadBits
}
class DebugBundle(val nIn: Int) extends Bundle {
val va_stall = Vec(nIn, UInt())
val sa_stall = Vec(nIn, UInt())
}
class Router(
val routerParams: RouterParams,
preDiplomaticInParams: Seq[ChannelParams],
preDiplomaticIngressParams: Seq[IngressChannelParams],
outDests: Seq[Int],
egressIds: Seq[Int]
)(implicit p: Parameters) extends LazyModule with HasNoCParams with HasRouterParams {
val allPreDiplomaticInParams = preDiplomaticInParams ++ preDiplomaticIngressParams
val destNodes = preDiplomaticInParams.map(u => ChannelDestNode(u))
val sourceNodes = outDests.map(u => ChannelSourceNode(u))
val ingressNodes = preDiplomaticIngressParams.map(u => IngressChannelDestNode(u))
val egressNodes = egressIds.map(u => EgressChannelSourceNode(u))
val debugNode = BundleBridgeSource(() => new DebugBundle(allPreDiplomaticInParams.size))
val ctrlNode = if (hasCtrl) Some(BundleBridgeSource(() => new RouterCtrlBundle)) else None
def inParams = module.inParams
def outParams = module.outParams
def ingressParams = module.ingressParams
def egressParams = module.egressParams
lazy val module = new LazyModuleImp(this) with HasRouterInputParams with HasRouterOutputParams {
val (io_in, edgesIn) = destNodes.map(_.in(0)).unzip
val (io_out, edgesOut) = sourceNodes.map(_.out(0)).unzip
val (io_ingress, edgesIngress) = ingressNodes.map(_.in(0)).unzip
val (io_egress, edgesEgress) = egressNodes.map(_.out(0)).unzip
val io_debug = debugNode.out(0)._1
val inParams = edgesIn.map(_.cp)
val outParams = edgesOut.map(_.cp)
val ingressParams = edgesIngress.map(_.cp)
val egressParams = edgesEgress.map(_.cp)
allOutParams.foreach(u => require(u.srcId == nodeId && u.payloadBits == routerParams.user.payloadBits))
allInParams.foreach(u => require(u.destId == nodeId && u.payloadBits == routerParams.user.payloadBits))
require(nIngress == routerParams.nIngress)
require(nEgress == routerParams.nEgress)
require(nAllInputs >= 1)
require(nAllOutputs >= 1)
require(nodeId < (1 << nodeIdBits))
val input_units = inParams.zipWithIndex.map { case (u,i) =>
Module(new InputUnit(u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"input_unit_${i}_from_${u.srcId}") }
val ingress_units = ingressParams.zipWithIndex.map { case (u,i) =>
Module(new IngressUnit(i, u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"ingress_unit_${i+nInputs}_from_${u.ingressId}") }
val all_input_units = input_units ++ ingress_units
val output_units = outParams.zipWithIndex.map { case (u,i) =>
Module(new OutputUnit(inParams, ingressParams, u))
.suggestName(s"output_unit_${i}_to_${u.destId}")}
val egress_units = egressParams.zipWithIndex.map { case (u,i) =>
Module(new EgressUnit(routerParams.user.coupleSAVA && all_input_units.size == 1,
routerParams.user.combineSAST,
inParams, ingressParams, u))
.suggestName(s"egress_unit_${i+nOutputs}_to_${u.egressId}")}
val all_output_units = output_units ++ egress_units
val switch = Module(new Switch(routerParams, inParams, outParams, ingressParams, egressParams))
val switch_allocator = Module(new SwitchAllocator(routerParams, inParams, outParams, ingressParams, egressParams))
val vc_allocator = Module(routerParams.user.vcAllocator(
VCAllocatorParams(routerParams, inParams, outParams, ingressParams, egressParams)
)(p))
val route_computer = Module(new RouteComputer(routerParams, inParams, outParams, ingressParams, egressParams))
val fires_count = WireInit(PopCount(vc_allocator.io.req.map(_.fire)))
dontTouch(fires_count)
(io_in zip input_units ).foreach { case (i,u) => u.io.in <> i }
(io_ingress zip ingress_units).foreach { case (i,u) => u.io.in <> i.flit }
(output_units zip io_out ).foreach { case (u,o) => o <> u.io.out }
(egress_units zip io_egress).foreach { case (u,o) => o.flit <> u.io.out }
(route_computer.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.router_req }
(all_input_units zip route_computer.io.resp).foreach {
case (u,o) => u.io.router_resp <> o }
(vc_allocator.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.vcalloc_req }
(all_input_units zip vc_allocator.io.resp).foreach {
case (u,o) => u.io.vcalloc_resp <> o }
(all_output_units zip vc_allocator.io.out_allocs).foreach {
case (u,a) => u.io.allocs <> a }
(vc_allocator.io.channel_status zip all_output_units).foreach {
case (a,u) => a := u.io.channel_status }
all_input_units.foreach(in => all_output_units.zipWithIndex.foreach { case (out,outIdx) =>
in.io.out_credit_available(outIdx) := out.io.credit_available
})
(all_input_units zip switch_allocator.io.req).foreach {
case (u,r) => r <> u.io.salloc_req }
(all_output_units zip switch_allocator.io.credit_alloc).foreach {
case (u,a) => u.io.credit_alloc := a }
(switch.io.in zip all_input_units).foreach {
case (i,u) => i <> u.io.out }
(all_output_units zip switch.io.out).foreach {
case (u,o) => u.io.in <> o }
switch.io.sel := (if (routerParams.user.combineSAST) {
switch_allocator.io.switch_sel
} else {
RegNext(switch_allocator.io.switch_sel)
})
if (hasCtrl) {
val io_ctrl = ctrlNode.get.out(0)._1
val ctrl = Module(new RouterControlUnit(routerParams, inParams, outParams, ingressParams, egressParams))
io_ctrl <> ctrl.io.ctrl
(all_input_units zip ctrl.io.in_block ).foreach { case (l,r) => l.io.block := r }
(all_input_units zip ctrl.io.in_fire ).foreach { case (l,r) => r := l.io.out.map(_.valid) }
} else {
input_units.foreach(_.io.block := false.B)
ingress_units.foreach(_.io.block := false.B)
}
(io_debug.va_stall zip all_input_units.map(_.io.debug.va_stall)).map { case (l,r) => l := r }
(io_debug.sa_stall zip all_input_units.map(_.io.debug.sa_stall)).map { case (l,r) => l := r }
val debug_tsc = RegInit(0.U(64.W))
debug_tsc := debug_tsc + 1.U
val debug_sample = RegInit(0.U(64.W))
debug_sample := debug_sample + 1.U
val sample_rate = PlusArg("noc_util_sample_rate", width=20)
when (debug_sample === sample_rate - 1.U) { debug_sample := 0.U }
def sample(fire: Bool, s: String) = {
val util_ctr = RegInit(0.U(64.W))
val fired = RegInit(false.B)
util_ctr := util_ctr + fire
fired := fired || fire
when (sample_rate =/= 0.U && debug_sample === sample_rate - 1.U && fired) {
val fmtStr = s"nocsample %d $s %d\n"
printf(fmtStr, debug_tsc, util_ctr);
fired := fire
}
}
destNodes.map(_.in(0)).foreach { case (in, edge) => in.flit.map { f =>
sample(f.fire, s"${edge.cp.srcId} $nodeId")
} }
ingressNodes.map(_.in(0)).foreach { case (in, edge) =>
sample(in.flit.fire, s"i${edge.cp.asInstanceOf[IngressChannelParams].ingressId} $nodeId")
}
egressNodes.map(_.out(0)).foreach { case (out, edge) =>
sample(out.flit.fire, s"$nodeId e${edge.cp.asInstanceOf[EgressChannelParams].egressId}")
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module Router_9( // @[Router.scala:89:25]
input clock, // @[Router.scala:89:25]
input reset, // @[Router.scala:89:25]
output [4:0] auto_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_va_stall_3, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_debug_out_sa_stall_3, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_1_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_0_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_2_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_2_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_2_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_2_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_ingress_nodes_in_2_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_1_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_1_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_ingress_nodes_in_1_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_0_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_ingress_nodes_in_0_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_source_nodes_out_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_source_nodes_out_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_source_nodes_out_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_source_nodes_out_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [21:0] auto_source_nodes_out_credit_return, // @[LazyModuleImp.scala:107:25]
input [21:0] auto_source_nodes_out_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_dest_nodes_in_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_dest_nodes_in_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_dest_nodes_in_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_dest_nodes_in_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [21:0] auto_dest_nodes_in_credit_return, // @[LazyModuleImp.scala:107:25]
output [21:0] auto_dest_nodes_in_vc_free // @[LazyModuleImp.scala:107:25]
);
wire [19:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire _vc_allocator_io_req_3_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_2_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_1_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_0_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_10; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_11; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_12; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_13; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_14; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_15; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_16; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_17; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_18; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_19; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_20; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_21; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_10; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_11; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_12; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_13; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_14; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_15; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_16; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_17; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_18; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_19; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_20; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_21; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_10; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_11; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_12; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_13; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_14; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_15; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_16; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_17; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_18; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_19; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_20; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_21; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_2_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_1_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_10_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_11_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_14_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_15_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_18_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_19_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_20_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_21_alloc; // @[Router.scala:133:30]
wire _switch_allocator_io_req_3_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_2_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_1_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_0_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_10_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_11_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_14_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_15_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_18_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_19_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_20_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_21_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_1_0; // @[Router.scala:132:34]
wire _switch_io_out_2_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_2_0_bits_payload; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_2_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_2_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_1_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_1_0_bits_payload; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_1_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_1_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_0_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_0_0_bits_payload; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_0_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [5:0] _switch_io_out_0_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [4:0] _switch_io_out_0_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _egress_unit_2_to_24_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_2_to_24_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_2_to_24_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_1_to_23_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_1_to_23_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_1_to_23_io_out_valid; // @[Router.scala:125:13]
wire _output_unit_0_to_31_io_credit_available_10; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_credit_available_11; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_credit_available_14; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_credit_available_15; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_credit_available_18; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_credit_available_19; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_credit_available_20; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_credit_available_21; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_channel_status_10_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_channel_status_11_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_channel_status_14_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_channel_status_15_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_channel_status_18_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_channel_status_19_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_channel_status_20_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_31_io_channel_status_21_occupied; // @[Router.scala:122:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_vcalloc_req_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_3_from_29_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_3_from_29_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_3_from_29_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_3_from_29_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_3_from_29_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_3_from_29_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [4:0] _ingress_unit_3_from_29_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_3_from_29_io_in_ready; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_vcalloc_req_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_2_from_28_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_2_from_28_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_2_from_28_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_2_from_28_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_2_from_28_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_2_from_28_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [4:0] _ingress_unit_2_from_28_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_2_from_28_io_in_ready; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_vcalloc_req_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_10; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_11; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_12; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_13; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_14; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_15; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_16; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_17; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_18; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_19; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_20; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_vc_sel_0_21; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_1_from_27_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_27_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_1_from_27_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_1_from_27_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [5:0] _ingress_unit_1_from_27_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_1_from_27_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [4:0] _ingress_unit_1_from_27_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_1_from_27_io_in_ready; // @[Router.scala:116:13]
wire _input_unit_0_from_31_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_31_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_31_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_31_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_31_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_31_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_31_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_0_from_31_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_31_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_0_from_31_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [72:0] _input_unit_0_from_31_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_31_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [5:0] _input_unit_0_from_31_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_31_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [5:0] _input_unit_0_from_31_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_31_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [2:0] fires_count = {1'h0, {1'h0, _vc_allocator_io_req_0_ready & _input_unit_0_from_31_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_1_ready & _ingress_unit_1_from_27_io_vcalloc_req_valid}} + {1'h0, {1'h0, _vc_allocator_io_req_2_ready & _ingress_unit_2_from_28_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_3_ready & _ingress_unit_3_from_29_io_vcalloc_req_valid}}; // @[Decoupled.scala:51:35]
reg REG_2_0_3_0; // @[Router.scala:178:14]
reg REG_2_0_2_0; // @[Router.scala:178:14]
reg REG_2_0_1_0; // @[Router.scala:178:14]
reg REG_2_0_0_0; // @[Router.scala:178:14]
reg REG_1_0_3_0; // @[Router.scala:178:14]
reg REG_1_0_2_0; // @[Router.scala:178:14]
reg REG_1_0_1_0; // @[Router.scala:178:14]
reg REG_1_0_0_0; // @[Router.scala:178:14]
reg REG_0_0_3_0; // @[Router.scala:178:14]
reg REG_0_0_2_0; // @[Router.scala:178:14]
reg REG_0_0_1_0; // @[Router.scala:178:14]
reg [63:0] debug_tsc; // @[Router.scala:195:28]
reg [63:0] debug_sample; // @[Router.scala:197:31]
wire _GEN = debug_sample == {44'h0, _plusarg_reader_out - 20'h1}; // @[PlusArg.scala:80:11]
reg [63:0] util_ctr; // @[Router.scala:203:29]
reg fired; // @[Router.scala:204:26]
wire _GEN_0 = (|_plusarg_reader_out) & _GEN; // @[PlusArg.scala:80:11]
wire _GEN_1 = _GEN_0 & fired; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_1; // @[Router.scala:203:29]
reg fired_1; // @[Router.scala:204:26]
wire _GEN_2 = _GEN_0 & fired_1; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_2; // @[Router.scala:203:29]
reg fired_2; // @[Router.scala:204:26]
wire _GEN_3 = _GEN_0 & fired_2; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_3; // @[Router.scala:203:29]
reg fired_3; // @[Router.scala:204:26]
wire _GEN_4 = _GEN_0 & fired_3; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_4; // @[Router.scala:203:29]
reg fired_4; // @[Router.scala:204:26]
wire _GEN_5 = _GEN_0 & fired_4; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_5; // @[Router.scala:203:29]
reg fired_5; // @[Router.scala:204:26]
wire _GEN_6 = _GEN_0 & fired_5; // @[Router.scala:204:26, :207:{33,71}] |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulAddRecFNToRaw_postMul_e8_s24_61( // @[MulAddRecFN.scala:169:7]
input io_fromPreMul_isSigNaNAny, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isNaNAOrB, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isInfA, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isZeroA, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_signProd, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isNaNC, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isInfC, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isZeroC, // @[MulAddRecFN.scala:172:16]
input [9:0] io_fromPreMul_sExpSum, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_doSubMags, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_CIsDominant, // @[MulAddRecFN.scala:172:16]
input [4:0] io_fromPreMul_CDom_CAlignDist, // @[MulAddRecFN.scala:172:16]
input [25:0] io_fromPreMul_highAlignedSigC, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_bit0AlignedSigC, // @[MulAddRecFN.scala:172:16]
input [48:0] io_mulAddResult, // @[MulAddRecFN.scala:172:16]
output io_invalidExc, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isNaN, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isInf, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isZero, // @[MulAddRecFN.scala:172:16]
output io_rawOut_sign, // @[MulAddRecFN.scala:172:16]
output [9:0] io_rawOut_sExp, // @[MulAddRecFN.scala:172:16]
output [26:0] io_rawOut_sig // @[MulAddRecFN.scala:172:16]
);
wire io_fromPreMul_isSigNaNAny_0 = io_fromPreMul_isSigNaNAny; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isNaNAOrB_0 = io_fromPreMul_isNaNAOrB; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isInfA_0 = io_fromPreMul_isInfA; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isZeroA_0 = io_fromPreMul_isZeroA; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_signProd_0 = io_fromPreMul_signProd; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isNaNC_0 = io_fromPreMul_isNaNC; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isInfC_0 = io_fromPreMul_isInfC; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isZeroC_0 = io_fromPreMul_isZeroC; // @[MulAddRecFN.scala:169:7]
wire [9:0] io_fromPreMul_sExpSum_0 = io_fromPreMul_sExpSum; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_doSubMags_0 = io_fromPreMul_doSubMags; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_CIsDominant_0 = io_fromPreMul_CIsDominant; // @[MulAddRecFN.scala:169:7]
wire [4:0] io_fromPreMul_CDom_CAlignDist_0 = io_fromPreMul_CDom_CAlignDist; // @[MulAddRecFN.scala:169:7]
wire [25:0] io_fromPreMul_highAlignedSigC_0 = io_fromPreMul_highAlignedSigC; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_bit0AlignedSigC_0 = io_fromPreMul_bit0AlignedSigC; // @[MulAddRecFN.scala:169:7]
wire [48:0] io_mulAddResult_0 = io_mulAddResult; // @[MulAddRecFN.scala:169:7]
wire _io_rawOut_sign_T_3 = 1'h1; // @[MulAddRecFN.scala:287:29]
wire [2:0] io_roundingMode = 3'h0; // @[MulAddRecFN.scala:169:7, :172:16]
wire io_fromPreMul_isInfB = 1'h0; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isZeroB = 1'h0; // @[MulAddRecFN.scala:169:7]
wire roundingMode_min = 1'h0; // @[MulAddRecFN.scala:186:45]
wire _io_invalidExc_T = 1'h0; // @[MulAddRecFN.scala:272:31]
wire _io_invalidExc_T_2 = 1'h0; // @[MulAddRecFN.scala:273:32]
wire _io_rawOut_sign_T_8 = 1'h0; // @[MulAddRecFN.scala:289:26]
wire _io_rawOut_sign_T_10 = 1'h0; // @[MulAddRecFN.scala:289:46]
wire _io_invalidExc_T_1 = io_fromPreMul_isSigNaNAny_0; // @[MulAddRecFN.scala:169:7, :271:35]
wire notNaN_isInfProd = io_fromPreMul_isInfA_0; // @[MulAddRecFN.scala:169:7, :264:49]
wire _io_invalidExc_T_5 = io_fromPreMul_isInfA_0; // @[MulAddRecFN.scala:169:7, :275:36]
wire _notNaN_addZeros_T = io_fromPreMul_isZeroA_0; // @[MulAddRecFN.scala:169:7, :267:32]
wire _io_invalidExc_T_9; // @[MulAddRecFN.scala:273:57]
wire _io_rawOut_isNaN_T; // @[MulAddRecFN.scala:278:48]
wire notNaN_isInfOut; // @[MulAddRecFN.scala:265:44]
wire _io_rawOut_isZero_T_2; // @[MulAddRecFN.scala:282:25]
wire _io_rawOut_sign_T_17; // @[MulAddRecFN.scala:290:50]
wire [9:0] _io_rawOut_sExp_T; // @[MulAddRecFN.scala:293:26]
wire [26:0] _io_rawOut_sig_T; // @[MulAddRecFN.scala:294:25]
wire io_rawOut_isNaN_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_isInf_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_isZero_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_sign_0; // @[MulAddRecFN.scala:169:7]
wire [9:0] io_rawOut_sExp_0; // @[MulAddRecFN.scala:169:7]
wire [26:0] io_rawOut_sig_0; // @[MulAddRecFN.scala:169:7]
wire io_invalidExc_0; // @[MulAddRecFN.scala:169:7]
wire opSignC = io_fromPreMul_signProd_0 ^ io_fromPreMul_doSubMags_0; // @[MulAddRecFN.scala:169:7, :190:42]
wire _sigSum_T = io_mulAddResult_0[48]; // @[MulAddRecFN.scala:169:7, :192:32]
wire [26:0] _sigSum_T_1 = {1'h0, io_fromPreMul_highAlignedSigC_0} + 27'h1; // @[MulAddRecFN.scala:169:7, :193:47]
wire [25:0] _sigSum_T_2 = _sigSum_T_1[25:0]; // @[MulAddRecFN.scala:193:47]
wire [25:0] _sigSum_T_3 = _sigSum_T ? _sigSum_T_2 : io_fromPreMul_highAlignedSigC_0; // @[MulAddRecFN.scala:169:7, :192:{16,32}, :193:47]
wire [47:0] _sigSum_T_4 = io_mulAddResult_0[47:0]; // @[MulAddRecFN.scala:169:7, :196:28]
wire [73:0] sigSum_hi = {_sigSum_T_3, _sigSum_T_4}; // @[MulAddRecFN.scala:192:{12,16}, :196:28]
wire [74:0] sigSum = {sigSum_hi, io_fromPreMul_bit0AlignedSigC_0}; // @[MulAddRecFN.scala:169:7, :192:12]
wire [1:0] _CDom_sExp_T = {1'h0, io_fromPreMul_doSubMags_0}; // @[MulAddRecFN.scala:169:7, :203:69]
wire [10:0] _GEN = {io_fromPreMul_sExpSum_0[9], io_fromPreMul_sExpSum_0}; // @[MulAddRecFN.scala:169:7, :203:43]
wire [10:0] _CDom_sExp_T_1 = _GEN - {{9{_CDom_sExp_T[1]}}, _CDom_sExp_T}; // @[MulAddRecFN.scala:203:{43,69}]
wire [9:0] _CDom_sExp_T_2 = _CDom_sExp_T_1[9:0]; // @[MulAddRecFN.scala:203:43]
wire [9:0] CDom_sExp = _CDom_sExp_T_2; // @[MulAddRecFN.scala:203:43]
wire [49:0] _CDom_absSigSum_T = sigSum[74:25]; // @[MulAddRecFN.scala:192:12, :206:20]
wire [49:0] _CDom_absSigSum_T_1 = ~_CDom_absSigSum_T; // @[MulAddRecFN.scala:206:{13,20}]
wire [1:0] _CDom_absSigSum_T_2 = io_fromPreMul_highAlignedSigC_0[25:24]; // @[MulAddRecFN.scala:169:7, :209:46]
wire [2:0] _CDom_absSigSum_T_3 = {1'h0, _CDom_absSigSum_T_2}; // @[MulAddRecFN.scala:207:22, :209:46]
wire [46:0] _CDom_absSigSum_T_4 = sigSum[72:26]; // @[MulAddRecFN.scala:192:12, :210:23]
wire [49:0] _CDom_absSigSum_T_5 = {_CDom_absSigSum_T_3, _CDom_absSigSum_T_4}; // @[MulAddRecFN.scala:207:22, :209:71, :210:23]
wire [49:0] CDom_absSigSum = io_fromPreMul_doSubMags_0 ? _CDom_absSigSum_T_1 : _CDom_absSigSum_T_5; // @[MulAddRecFN.scala:169:7, :205:12, :206:13, :209:71]
wire [23:0] _CDom_absSigSumExtra_T = sigSum[24:1]; // @[MulAddRecFN.scala:192:12, :215:21]
wire [23:0] _CDom_absSigSumExtra_T_1 = ~_CDom_absSigSumExtra_T; // @[MulAddRecFN.scala:215:{14,21}]
wire _CDom_absSigSumExtra_T_2 = |_CDom_absSigSumExtra_T_1; // @[MulAddRecFN.scala:215:{14,36}]
wire [24:0] _CDom_absSigSumExtra_T_3 = sigSum[25:1]; // @[MulAddRecFN.scala:192:12, :216:19]
wire _CDom_absSigSumExtra_T_4 = |_CDom_absSigSumExtra_T_3; // @[MulAddRecFN.scala:216:{19,37}]
wire CDom_absSigSumExtra = io_fromPreMul_doSubMags_0 ? _CDom_absSigSumExtra_T_2 : _CDom_absSigSumExtra_T_4; // @[MulAddRecFN.scala:169:7, :214:12, :215:36, :216:37]
wire [80:0] _CDom_mainSig_T = {31'h0, CDom_absSigSum} << io_fromPreMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:169:7, :205:12, :219:24]
wire [28:0] CDom_mainSig = _CDom_mainSig_T[49:21]; // @[MulAddRecFN.scala:219:{24,56}]
wire [23:0] _CDom_reduced4SigExtra_T = CDom_absSigSum[23:0]; // @[MulAddRecFN.scala:205:12, :222:36]
wire [26:0] _CDom_reduced4SigExtra_T_1 = {_CDom_reduced4SigExtra_T, 3'h0}; // @[MulAddRecFN.scala:169:7, :172:16, :222:{36,53}]
wire _CDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:123:57]
wire CDom_reduced4SigExtra_reducedVec_0; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_1; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_2; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_3; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_4; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_5; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_6; // @[primitives.scala:118:30]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_0_T = _CDom_reduced4SigExtra_T_1[3:0]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_0_T_1 = |_CDom_reduced4SigExtra_reducedVec_0_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_0 = _CDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_1_T = _CDom_reduced4SigExtra_T_1[7:4]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_1_T_1 = |_CDom_reduced4SigExtra_reducedVec_1_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_1 = _CDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_2_T = _CDom_reduced4SigExtra_T_1[11:8]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_2_T_1 = |_CDom_reduced4SigExtra_reducedVec_2_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_2 = _CDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_3_T = _CDom_reduced4SigExtra_T_1[15:12]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_3_T_1 = |_CDom_reduced4SigExtra_reducedVec_3_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_3 = _CDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_4_T = _CDom_reduced4SigExtra_T_1[19:16]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_4_T_1 = |_CDom_reduced4SigExtra_reducedVec_4_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_4 = _CDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_5_T = _CDom_reduced4SigExtra_T_1[23:20]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_5_T_1 = |_CDom_reduced4SigExtra_reducedVec_5_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_5 = _CDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:118:30, :120:54]
wire [2:0] _CDom_reduced4SigExtra_reducedVec_6_T = _CDom_reduced4SigExtra_T_1[26:24]; // @[primitives.scala:123:15]
assign _CDom_reduced4SigExtra_reducedVec_6_T_1 = |_CDom_reduced4SigExtra_reducedVec_6_T; // @[primitives.scala:123:{15,57}]
assign CDom_reduced4SigExtra_reducedVec_6 = _CDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:118:30, :123:57]
wire [1:0] CDom_reduced4SigExtra_lo_hi = {CDom_reduced4SigExtra_reducedVec_2, CDom_reduced4SigExtra_reducedVec_1}; // @[primitives.scala:118:30, :124:20]
wire [2:0] CDom_reduced4SigExtra_lo = {CDom_reduced4SigExtra_lo_hi, CDom_reduced4SigExtra_reducedVec_0}; // @[primitives.scala:118:30, :124:20]
wire [1:0] CDom_reduced4SigExtra_hi_lo = {CDom_reduced4SigExtra_reducedVec_4, CDom_reduced4SigExtra_reducedVec_3}; // @[primitives.scala:118:30, :124:20]
wire [1:0] CDom_reduced4SigExtra_hi_hi = {CDom_reduced4SigExtra_reducedVec_6, CDom_reduced4SigExtra_reducedVec_5}; // @[primitives.scala:118:30, :124:20]
wire [3:0] CDom_reduced4SigExtra_hi = {CDom_reduced4SigExtra_hi_hi, CDom_reduced4SigExtra_hi_lo}; // @[primitives.scala:124:20]
wire [6:0] _CDom_reduced4SigExtra_T_2 = {CDom_reduced4SigExtra_hi, CDom_reduced4SigExtra_lo}; // @[primitives.scala:124:20]
wire [2:0] _CDom_reduced4SigExtra_T_3 = io_fromPreMul_CDom_CAlignDist_0[4:2]; // @[MulAddRecFN.scala:169:7, :223:51]
wire [2:0] _CDom_reduced4SigExtra_T_4 = ~_CDom_reduced4SigExtra_T_3; // @[primitives.scala:52:21]
wire [8:0] CDom_reduced4SigExtra_shift = $signed(9'sh100 >>> _CDom_reduced4SigExtra_T_4); // @[primitives.scala:52:21, :76:56]
wire [5:0] _CDom_reduced4SigExtra_T_5 = CDom_reduced4SigExtra_shift[6:1]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _CDom_reduced4SigExtra_T_6 = _CDom_reduced4SigExtra_T_5[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _CDom_reduced4SigExtra_T_7 = _CDom_reduced4SigExtra_T_6[1:0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_8 = _CDom_reduced4SigExtra_T_7[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_9 = _CDom_reduced4SigExtra_T_7[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_10 = {_CDom_reduced4SigExtra_T_8, _CDom_reduced4SigExtra_T_9}; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_11 = _CDom_reduced4SigExtra_T_6[3:2]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_12 = _CDom_reduced4SigExtra_T_11[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_13 = _CDom_reduced4SigExtra_T_11[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_14 = {_CDom_reduced4SigExtra_T_12, _CDom_reduced4SigExtra_T_13}; // @[primitives.scala:77:20]
wire [3:0] _CDom_reduced4SigExtra_T_15 = {_CDom_reduced4SigExtra_T_10, _CDom_reduced4SigExtra_T_14}; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_16 = _CDom_reduced4SigExtra_T_5[5:4]; // @[primitives.scala:77:20, :78:22]
wire _CDom_reduced4SigExtra_T_17 = _CDom_reduced4SigExtra_T_16[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_18 = _CDom_reduced4SigExtra_T_16[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_19 = {_CDom_reduced4SigExtra_T_17, _CDom_reduced4SigExtra_T_18}; // @[primitives.scala:77:20]
wire [5:0] _CDom_reduced4SigExtra_T_20 = {_CDom_reduced4SigExtra_T_15, _CDom_reduced4SigExtra_T_19}; // @[primitives.scala:77:20]
wire [6:0] _CDom_reduced4SigExtra_T_21 = {1'h0, _CDom_reduced4SigExtra_T_2[5:0] & _CDom_reduced4SigExtra_T_20}; // @[primitives.scala:77:20, :124:20]
wire CDom_reduced4SigExtra = |_CDom_reduced4SigExtra_T_21; // @[MulAddRecFN.scala:222:72, :223:73]
wire [25:0] _CDom_sig_T = CDom_mainSig[28:3]; // @[MulAddRecFN.scala:219:56, :225:25]
wire [2:0] _CDom_sig_T_1 = CDom_mainSig[2:0]; // @[MulAddRecFN.scala:219:56, :226:25]
wire _CDom_sig_T_2 = |_CDom_sig_T_1; // @[MulAddRecFN.scala:226:{25,32}]
wire _CDom_sig_T_3 = _CDom_sig_T_2 | CDom_reduced4SigExtra; // @[MulAddRecFN.scala:223:73, :226:{32,36}]
wire _CDom_sig_T_4 = _CDom_sig_T_3 | CDom_absSigSumExtra; // @[MulAddRecFN.scala:214:12, :226:{36,61}]
wire [26:0] CDom_sig = {_CDom_sig_T, _CDom_sig_T_4}; // @[MulAddRecFN.scala:225:{12,25}, :226:61]
wire notCDom_signSigSum = sigSum[51]; // @[MulAddRecFN.scala:192:12, :232:36]
wire [50:0] _notCDom_absSigSum_T = sigSum[50:0]; // @[MulAddRecFN.scala:192:12, :235:20]
wire [50:0] _notCDom_absSigSum_T_2 = sigSum[50:0]; // @[MulAddRecFN.scala:192:12, :235:20, :236:19]
wire [50:0] _notCDom_absSigSum_T_1 = ~_notCDom_absSigSum_T; // @[MulAddRecFN.scala:235:{13,20}]
wire [51:0] _notCDom_absSigSum_T_3 = {1'h0, _notCDom_absSigSum_T_2} + {51'h0, io_fromPreMul_doSubMags_0}; // @[MulAddRecFN.scala:169:7, :236:{19,41}]
wire [50:0] _notCDom_absSigSum_T_4 = _notCDom_absSigSum_T_3[50:0]; // @[MulAddRecFN.scala:236:41]
wire [50:0] notCDom_absSigSum = notCDom_signSigSum ? _notCDom_absSigSum_T_1 : _notCDom_absSigSum_T_4; // @[MulAddRecFN.scala:232:36, :234:12, :235:13, :236:41]
wire _notCDom_reduced2AbsSigSum_reducedVec_0_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_1_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_2_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_3_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_4_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_5_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_6_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_7_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_8_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_9_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_10_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_11_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_12_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_13_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_14_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_15_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_16_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_17_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_18_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_19_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_20_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_21_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_22_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_23_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_24_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_25_T_1; // @[primitives.scala:106:57]
wire notCDom_reduced2AbsSigSum_reducedVec_0; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_1; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_2; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_3; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_4; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_5; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_6; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_7; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_8; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_9; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_10; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_11; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_12; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_13; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_14; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_15; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_16; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_17; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_18; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_19; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_20; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_21; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_22; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_23; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_24; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_25; // @[primitives.scala:101:30]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_0_T = notCDom_absSigSum[1:0]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_0_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_0_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_0 = _notCDom_reduced2AbsSigSum_reducedVec_0_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_1_T = notCDom_absSigSum[3:2]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_1_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_1_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_1 = _notCDom_reduced2AbsSigSum_reducedVec_1_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_2_T = notCDom_absSigSum[5:4]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_2_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_2_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_2 = _notCDom_reduced2AbsSigSum_reducedVec_2_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_3_T = notCDom_absSigSum[7:6]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_3_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_3_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_3 = _notCDom_reduced2AbsSigSum_reducedVec_3_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_4_T = notCDom_absSigSum[9:8]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_4_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_4_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_4 = _notCDom_reduced2AbsSigSum_reducedVec_4_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_5_T = notCDom_absSigSum[11:10]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_5_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_5_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_5 = _notCDom_reduced2AbsSigSum_reducedVec_5_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_6_T = notCDom_absSigSum[13:12]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_6_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_6_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_6 = _notCDom_reduced2AbsSigSum_reducedVec_6_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_7_T = notCDom_absSigSum[15:14]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_7_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_7_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_7 = _notCDom_reduced2AbsSigSum_reducedVec_7_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_8_T = notCDom_absSigSum[17:16]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_8_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_8_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_8 = _notCDom_reduced2AbsSigSum_reducedVec_8_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_9_T = notCDom_absSigSum[19:18]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_9_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_9_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_9 = _notCDom_reduced2AbsSigSum_reducedVec_9_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_10_T = notCDom_absSigSum[21:20]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_10_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_10_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_10 = _notCDom_reduced2AbsSigSum_reducedVec_10_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_11_T = notCDom_absSigSum[23:22]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_11_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_11_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_11 = _notCDom_reduced2AbsSigSum_reducedVec_11_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_12_T = notCDom_absSigSum[25:24]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_12_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_12_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_12 = _notCDom_reduced2AbsSigSum_reducedVec_12_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_13_T = notCDom_absSigSum[27:26]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_13_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_13_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_13 = _notCDom_reduced2AbsSigSum_reducedVec_13_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_14_T = notCDom_absSigSum[29:28]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_14_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_14_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_14 = _notCDom_reduced2AbsSigSum_reducedVec_14_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_15_T = notCDom_absSigSum[31:30]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_15_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_15_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_15 = _notCDom_reduced2AbsSigSum_reducedVec_15_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_16_T = notCDom_absSigSum[33:32]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_16_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_16_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_16 = _notCDom_reduced2AbsSigSum_reducedVec_16_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_17_T = notCDom_absSigSum[35:34]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_17_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_17_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_17 = _notCDom_reduced2AbsSigSum_reducedVec_17_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_18_T = notCDom_absSigSum[37:36]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_18_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_18_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_18 = _notCDom_reduced2AbsSigSum_reducedVec_18_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_19_T = notCDom_absSigSum[39:38]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_19_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_19_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_19 = _notCDom_reduced2AbsSigSum_reducedVec_19_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_20_T = notCDom_absSigSum[41:40]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_20_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_20_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_20 = _notCDom_reduced2AbsSigSum_reducedVec_20_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_21_T = notCDom_absSigSum[43:42]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_21_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_21_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_21 = _notCDom_reduced2AbsSigSum_reducedVec_21_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_22_T = notCDom_absSigSum[45:44]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_22_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_22_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_22 = _notCDom_reduced2AbsSigSum_reducedVec_22_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_23_T = notCDom_absSigSum[47:46]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_23_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_23_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_23 = _notCDom_reduced2AbsSigSum_reducedVec_23_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_24_T = notCDom_absSigSum[49:48]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_24_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_24_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_24 = _notCDom_reduced2AbsSigSum_reducedVec_24_T_1; // @[primitives.scala:101:30, :103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_25_T = notCDom_absSigSum[50]; // @[primitives.scala:106:15]
assign _notCDom_reduced2AbsSigSum_reducedVec_25_T_1 = _notCDom_reduced2AbsSigSum_reducedVec_25_T; // @[primitives.scala:106:{15,57}]
assign notCDom_reduced2AbsSigSum_reducedVec_25 = _notCDom_reduced2AbsSigSum_reducedVec_25_T_1; // @[primitives.scala:101:30, :106:57]
wire [1:0] notCDom_reduced2AbsSigSum_lo_lo_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_2, notCDom_reduced2AbsSigSum_reducedVec_1}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_lo_lo = {notCDom_reduced2AbsSigSum_lo_lo_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_0}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_lo_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_5, notCDom_reduced2AbsSigSum_reducedVec_4}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_lo_hi = {notCDom_reduced2AbsSigSum_lo_lo_hi_hi, notCDom_reduced2AbsSigSum_reducedVec_3}; // @[primitives.scala:101:30, :107:20]
wire [5:0] notCDom_reduced2AbsSigSum_lo_lo = {notCDom_reduced2AbsSigSum_lo_lo_hi, notCDom_reduced2AbsSigSum_lo_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_8, notCDom_reduced2AbsSigSum_reducedVec_7}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_hi_lo = {notCDom_reduced2AbsSigSum_lo_hi_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_6}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_hi_lo = {notCDom_reduced2AbsSigSum_reducedVec_10, notCDom_reduced2AbsSigSum_reducedVec_9}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_12, notCDom_reduced2AbsSigSum_reducedVec_11}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced2AbsSigSum_lo_hi_hi = {notCDom_reduced2AbsSigSum_lo_hi_hi_hi, notCDom_reduced2AbsSigSum_lo_hi_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] notCDom_reduced2AbsSigSum_lo_hi = {notCDom_reduced2AbsSigSum_lo_hi_hi, notCDom_reduced2AbsSigSum_lo_hi_lo}; // @[primitives.scala:107:20]
wire [12:0] notCDom_reduced2AbsSigSum_lo = {notCDom_reduced2AbsSigSum_lo_hi, notCDom_reduced2AbsSigSum_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_lo_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_15, notCDom_reduced2AbsSigSum_reducedVec_14}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_lo_lo = {notCDom_reduced2AbsSigSum_hi_lo_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_13}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_lo_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_18, notCDom_reduced2AbsSigSum_reducedVec_17}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_lo_hi = {notCDom_reduced2AbsSigSum_hi_lo_hi_hi, notCDom_reduced2AbsSigSum_reducedVec_16}; // @[primitives.scala:101:30, :107:20]
wire [5:0] notCDom_reduced2AbsSigSum_hi_lo = {notCDom_reduced2AbsSigSum_hi_lo_hi, notCDom_reduced2AbsSigSum_hi_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_21, notCDom_reduced2AbsSigSum_reducedVec_20}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_hi_lo = {notCDom_reduced2AbsSigSum_hi_hi_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_19}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_hi_lo = {notCDom_reduced2AbsSigSum_reducedVec_23, notCDom_reduced2AbsSigSum_reducedVec_22}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_25, notCDom_reduced2AbsSigSum_reducedVec_24}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced2AbsSigSum_hi_hi_hi = {notCDom_reduced2AbsSigSum_hi_hi_hi_hi, notCDom_reduced2AbsSigSum_hi_hi_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] notCDom_reduced2AbsSigSum_hi_hi = {notCDom_reduced2AbsSigSum_hi_hi_hi, notCDom_reduced2AbsSigSum_hi_hi_lo}; // @[primitives.scala:107:20]
wire [12:0] notCDom_reduced2AbsSigSum_hi = {notCDom_reduced2AbsSigSum_hi_hi, notCDom_reduced2AbsSigSum_hi_lo}; // @[primitives.scala:107:20]
wire [25:0] notCDom_reduced2AbsSigSum = {notCDom_reduced2AbsSigSum_hi, notCDom_reduced2AbsSigSum_lo}; // @[primitives.scala:107:20]
wire _notCDom_normDistReduced2_T = notCDom_reduced2AbsSigSum[0]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_1 = notCDom_reduced2AbsSigSum[1]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_2 = notCDom_reduced2AbsSigSum[2]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_3 = notCDom_reduced2AbsSigSum[3]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_4 = notCDom_reduced2AbsSigSum[4]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_5 = notCDom_reduced2AbsSigSum[5]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_6 = notCDom_reduced2AbsSigSum[6]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_7 = notCDom_reduced2AbsSigSum[7]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_8 = notCDom_reduced2AbsSigSum[8]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_9 = notCDom_reduced2AbsSigSum[9]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_10 = notCDom_reduced2AbsSigSum[10]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_11 = notCDom_reduced2AbsSigSum[11]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_12 = notCDom_reduced2AbsSigSum[12]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_13 = notCDom_reduced2AbsSigSum[13]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_14 = notCDom_reduced2AbsSigSum[14]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_15 = notCDom_reduced2AbsSigSum[15]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_16 = notCDom_reduced2AbsSigSum[16]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_17 = notCDom_reduced2AbsSigSum[17]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_18 = notCDom_reduced2AbsSigSum[18]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_19 = notCDom_reduced2AbsSigSum[19]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_20 = notCDom_reduced2AbsSigSum[20]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_21 = notCDom_reduced2AbsSigSum[21]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_22 = notCDom_reduced2AbsSigSum[22]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_23 = notCDom_reduced2AbsSigSum[23]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_24 = notCDom_reduced2AbsSigSum[24]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_25 = notCDom_reduced2AbsSigSum[25]; // @[primitives.scala:91:52, :107:20]
wire [4:0] _notCDom_normDistReduced2_T_26 = {4'hC, ~_notCDom_normDistReduced2_T_1}; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_27 = _notCDom_normDistReduced2_T_2 ? 5'h17 : _notCDom_normDistReduced2_T_26; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_28 = _notCDom_normDistReduced2_T_3 ? 5'h16 : _notCDom_normDistReduced2_T_27; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_29 = _notCDom_normDistReduced2_T_4 ? 5'h15 : _notCDom_normDistReduced2_T_28; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_30 = _notCDom_normDistReduced2_T_5 ? 5'h14 : _notCDom_normDistReduced2_T_29; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_31 = _notCDom_normDistReduced2_T_6 ? 5'h13 : _notCDom_normDistReduced2_T_30; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_32 = _notCDom_normDistReduced2_T_7 ? 5'h12 : _notCDom_normDistReduced2_T_31; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_33 = _notCDom_normDistReduced2_T_8 ? 5'h11 : _notCDom_normDistReduced2_T_32; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_34 = _notCDom_normDistReduced2_T_9 ? 5'h10 : _notCDom_normDistReduced2_T_33; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_35 = _notCDom_normDistReduced2_T_10 ? 5'hF : _notCDom_normDistReduced2_T_34; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_36 = _notCDom_normDistReduced2_T_11 ? 5'hE : _notCDom_normDistReduced2_T_35; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_37 = _notCDom_normDistReduced2_T_12 ? 5'hD : _notCDom_normDistReduced2_T_36; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_38 = _notCDom_normDistReduced2_T_13 ? 5'hC : _notCDom_normDistReduced2_T_37; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_39 = _notCDom_normDistReduced2_T_14 ? 5'hB : _notCDom_normDistReduced2_T_38; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_40 = _notCDom_normDistReduced2_T_15 ? 5'hA : _notCDom_normDistReduced2_T_39; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_41 = _notCDom_normDistReduced2_T_16 ? 5'h9 : _notCDom_normDistReduced2_T_40; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_42 = _notCDom_normDistReduced2_T_17 ? 5'h8 : _notCDom_normDistReduced2_T_41; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_43 = _notCDom_normDistReduced2_T_18 ? 5'h7 : _notCDom_normDistReduced2_T_42; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_44 = _notCDom_normDistReduced2_T_19 ? 5'h6 : _notCDom_normDistReduced2_T_43; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_45 = _notCDom_normDistReduced2_T_20 ? 5'h5 : _notCDom_normDistReduced2_T_44; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_46 = _notCDom_normDistReduced2_T_21 ? 5'h4 : _notCDom_normDistReduced2_T_45; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_47 = _notCDom_normDistReduced2_T_22 ? 5'h3 : _notCDom_normDistReduced2_T_46; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_48 = _notCDom_normDistReduced2_T_23 ? 5'h2 : _notCDom_normDistReduced2_T_47; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_49 = _notCDom_normDistReduced2_T_24 ? 5'h1 : _notCDom_normDistReduced2_T_48; // @[Mux.scala:50:70]
wire [4:0] notCDom_normDistReduced2 = _notCDom_normDistReduced2_T_25 ? 5'h0 : _notCDom_normDistReduced2_T_49; // @[Mux.scala:50:70]
wire [5:0] notCDom_nearNormDist = {notCDom_normDistReduced2, 1'h0}; // @[Mux.scala:50:70]
wire [6:0] _notCDom_sExp_T = {1'h0, notCDom_nearNormDist}; // @[MulAddRecFN.scala:240:56, :241:76]
wire [10:0] _notCDom_sExp_T_1 = _GEN - {{4{_notCDom_sExp_T[6]}}, _notCDom_sExp_T}; // @[MulAddRecFN.scala:203:43, :241:{46,76}]
wire [9:0] _notCDom_sExp_T_2 = _notCDom_sExp_T_1[9:0]; // @[MulAddRecFN.scala:241:46]
wire [9:0] notCDom_sExp = _notCDom_sExp_T_2; // @[MulAddRecFN.scala:241:46]
wire [113:0] _notCDom_mainSig_T = {63'h0, notCDom_absSigSum} << notCDom_nearNormDist; // @[MulAddRecFN.scala:234:12, :240:56, :243:27]
wire [28:0] notCDom_mainSig = _notCDom_mainSig_T[51:23]; // @[MulAddRecFN.scala:243:{27,50}]
wire [12:0] _notCDom_reduced4SigExtra_T = notCDom_reduced2AbsSigSum[12:0]; // @[primitives.scala:107:20]
wire [12:0] _notCDom_reduced4SigExtra_T_1 = _notCDom_reduced4SigExtra_T; // @[MulAddRecFN.scala:247:{39,55}]
wire _notCDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:106:57]
wire notCDom_reduced4SigExtra_reducedVec_0; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_1; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_2; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_3; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_4; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_5; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_6; // @[primitives.scala:101:30]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_0_T = _notCDom_reduced4SigExtra_T_1[1:0]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_0_T_1 = |_notCDom_reduced4SigExtra_reducedVec_0_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_0 = _notCDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_1_T = _notCDom_reduced4SigExtra_T_1[3:2]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_1_T_1 = |_notCDom_reduced4SigExtra_reducedVec_1_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_1 = _notCDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_2_T = _notCDom_reduced4SigExtra_T_1[5:4]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_2_T_1 = |_notCDom_reduced4SigExtra_reducedVec_2_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_2 = _notCDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_3_T = _notCDom_reduced4SigExtra_T_1[7:6]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_3_T_1 = |_notCDom_reduced4SigExtra_reducedVec_3_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_3 = _notCDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_4_T = _notCDom_reduced4SigExtra_T_1[9:8]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_4_T_1 = |_notCDom_reduced4SigExtra_reducedVec_4_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_4 = _notCDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_5_T = _notCDom_reduced4SigExtra_T_1[11:10]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_5_T_1 = |_notCDom_reduced4SigExtra_reducedVec_5_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_5 = _notCDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:101:30, :103:54]
wire _notCDom_reduced4SigExtra_reducedVec_6_T = _notCDom_reduced4SigExtra_T_1[12]; // @[primitives.scala:106:15]
assign _notCDom_reduced4SigExtra_reducedVec_6_T_1 = _notCDom_reduced4SigExtra_reducedVec_6_T; // @[primitives.scala:106:{15,57}]
assign notCDom_reduced4SigExtra_reducedVec_6 = _notCDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:101:30, :106:57]
wire [1:0] notCDom_reduced4SigExtra_lo_hi = {notCDom_reduced4SigExtra_reducedVec_2, notCDom_reduced4SigExtra_reducedVec_1}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced4SigExtra_lo = {notCDom_reduced4SigExtra_lo_hi, notCDom_reduced4SigExtra_reducedVec_0}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced4SigExtra_hi_lo = {notCDom_reduced4SigExtra_reducedVec_4, notCDom_reduced4SigExtra_reducedVec_3}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced4SigExtra_hi_hi = {notCDom_reduced4SigExtra_reducedVec_6, notCDom_reduced4SigExtra_reducedVec_5}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced4SigExtra_hi = {notCDom_reduced4SigExtra_hi_hi, notCDom_reduced4SigExtra_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] _notCDom_reduced4SigExtra_T_2 = {notCDom_reduced4SigExtra_hi, notCDom_reduced4SigExtra_lo}; // @[primitives.scala:107:20]
wire [3:0] _notCDom_reduced4SigExtra_T_3 = notCDom_normDistReduced2[4:1]; // @[Mux.scala:50:70]
wire [3:0] _notCDom_reduced4SigExtra_T_4 = ~_notCDom_reduced4SigExtra_T_3; // @[primitives.scala:52:21]
wire [16:0] notCDom_reduced4SigExtra_shift = $signed(17'sh10000 >>> _notCDom_reduced4SigExtra_T_4); // @[primitives.scala:52:21, :76:56]
wire [5:0] _notCDom_reduced4SigExtra_T_5 = notCDom_reduced4SigExtra_shift[6:1]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _notCDom_reduced4SigExtra_T_6 = _notCDom_reduced4SigExtra_T_5[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _notCDom_reduced4SigExtra_T_7 = _notCDom_reduced4SigExtra_T_6[1:0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_8 = _notCDom_reduced4SigExtra_T_7[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_9 = _notCDom_reduced4SigExtra_T_7[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_10 = {_notCDom_reduced4SigExtra_T_8, _notCDom_reduced4SigExtra_T_9}; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_11 = _notCDom_reduced4SigExtra_T_6[3:2]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_12 = _notCDom_reduced4SigExtra_T_11[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_13 = _notCDom_reduced4SigExtra_T_11[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_14 = {_notCDom_reduced4SigExtra_T_12, _notCDom_reduced4SigExtra_T_13}; // @[primitives.scala:77:20]
wire [3:0] _notCDom_reduced4SigExtra_T_15 = {_notCDom_reduced4SigExtra_T_10, _notCDom_reduced4SigExtra_T_14}; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_16 = _notCDom_reduced4SigExtra_T_5[5:4]; // @[primitives.scala:77:20, :78:22]
wire _notCDom_reduced4SigExtra_T_17 = _notCDom_reduced4SigExtra_T_16[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_18 = _notCDom_reduced4SigExtra_T_16[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_19 = {_notCDom_reduced4SigExtra_T_17, _notCDom_reduced4SigExtra_T_18}; // @[primitives.scala:77:20]
wire [5:0] _notCDom_reduced4SigExtra_T_20 = {_notCDom_reduced4SigExtra_T_15, _notCDom_reduced4SigExtra_T_19}; // @[primitives.scala:77:20]
wire [6:0] _notCDom_reduced4SigExtra_T_21 = {1'h0, _notCDom_reduced4SigExtra_T_2[5:0] & _notCDom_reduced4SigExtra_T_20}; // @[primitives.scala:77:20, :107:20]
wire notCDom_reduced4SigExtra = |_notCDom_reduced4SigExtra_T_21; // @[MulAddRecFN.scala:247:78, :249:11]
wire [25:0] _notCDom_sig_T = notCDom_mainSig[28:3]; // @[MulAddRecFN.scala:243:50, :251:28]
wire [2:0] _notCDom_sig_T_1 = notCDom_mainSig[2:0]; // @[MulAddRecFN.scala:243:50, :252:28]
wire _notCDom_sig_T_2 = |_notCDom_sig_T_1; // @[MulAddRecFN.scala:252:{28,35}]
wire _notCDom_sig_T_3 = _notCDom_sig_T_2 | notCDom_reduced4SigExtra; // @[MulAddRecFN.scala:249:11, :252:{35,39}]
wire [26:0] notCDom_sig = {_notCDom_sig_T, _notCDom_sig_T_3}; // @[MulAddRecFN.scala:251:{12,28}, :252:39]
wire [1:0] _notCDom_completeCancellation_T = notCDom_sig[26:25]; // @[MulAddRecFN.scala:251:12, :255:21]
wire notCDom_completeCancellation = _notCDom_completeCancellation_T == 2'h0; // @[primitives.scala:103:54]
wire _notCDom_sign_T = io_fromPreMul_signProd_0 ^ notCDom_signSigSum; // @[MulAddRecFN.scala:169:7, :232:36, :259:36]
wire notCDom_sign = ~notCDom_completeCancellation & _notCDom_sign_T; // @[MulAddRecFN.scala:255:50, :257:12, :259:36]
assign notNaN_isInfOut = notNaN_isInfProd | io_fromPreMul_isInfC_0; // @[MulAddRecFN.scala:169:7, :264:49, :265:44]
assign io_rawOut_isInf_0 = notNaN_isInfOut; // @[MulAddRecFN.scala:169:7, :265:44]
wire notNaN_addZeros = _notNaN_addZeros_T & io_fromPreMul_isZeroC_0; // @[MulAddRecFN.scala:169:7, :267:{32,58}]
wire _io_rawOut_sign_T_4 = notNaN_addZeros; // @[MulAddRecFN.scala:267:58, :287:26]
wire _io_invalidExc_T_3 = _io_invalidExc_T_1; // @[MulAddRecFN.scala:271:35, :272:57]
wire _io_invalidExc_T_4 = ~io_fromPreMul_isNaNAOrB_0; // @[MulAddRecFN.scala:169:7, :274:10]
wire _io_invalidExc_T_6 = _io_invalidExc_T_4 & _io_invalidExc_T_5; // @[MulAddRecFN.scala:274:{10,36}, :275:36]
wire _io_invalidExc_T_7 = _io_invalidExc_T_6 & io_fromPreMul_isInfC_0; // @[MulAddRecFN.scala:169:7, :274:36, :275:61]
wire _io_invalidExc_T_8 = _io_invalidExc_T_7 & io_fromPreMul_doSubMags_0; // @[MulAddRecFN.scala:169:7, :275:61, :276:35]
assign _io_invalidExc_T_9 = _io_invalidExc_T_3 | _io_invalidExc_T_8; // @[MulAddRecFN.scala:272:57, :273:57, :276:35]
assign io_invalidExc_0 = _io_invalidExc_T_9; // @[MulAddRecFN.scala:169:7, :273:57]
assign _io_rawOut_isNaN_T = io_fromPreMul_isNaNAOrB_0 | io_fromPreMul_isNaNC_0; // @[MulAddRecFN.scala:169:7, :278:48]
assign io_rawOut_isNaN_0 = _io_rawOut_isNaN_T; // @[MulAddRecFN.scala:169:7, :278:48]
wire _io_rawOut_isZero_T = ~io_fromPreMul_CIsDominant_0; // @[MulAddRecFN.scala:169:7, :283:14]
wire _io_rawOut_isZero_T_1 = _io_rawOut_isZero_T & notCDom_completeCancellation; // @[MulAddRecFN.scala:255:50, :283:{14,42}]
assign _io_rawOut_isZero_T_2 = notNaN_addZeros | _io_rawOut_isZero_T_1; // @[MulAddRecFN.scala:267:58, :282:25, :283:42]
assign io_rawOut_isZero_0 = _io_rawOut_isZero_T_2; // @[MulAddRecFN.scala:169:7, :282:25]
wire _io_rawOut_sign_T = notNaN_isInfProd & io_fromPreMul_signProd_0; // @[MulAddRecFN.scala:169:7, :264:49, :285:27]
wire _io_rawOut_sign_T_1 = io_fromPreMul_isInfC_0 & opSignC; // @[MulAddRecFN.scala:169:7, :190:42, :286:31]
wire _io_rawOut_sign_T_2 = _io_rawOut_sign_T | _io_rawOut_sign_T_1; // @[MulAddRecFN.scala:285:{27,54}, :286:31]
wire _io_rawOut_sign_T_5 = _io_rawOut_sign_T_4 & io_fromPreMul_signProd_0; // @[MulAddRecFN.scala:169:7, :287:{26,48}]
wire _io_rawOut_sign_T_6 = _io_rawOut_sign_T_5 & opSignC; // @[MulAddRecFN.scala:190:42, :287:48, :288:36]
wire _io_rawOut_sign_T_7 = _io_rawOut_sign_T_2 | _io_rawOut_sign_T_6; // @[MulAddRecFN.scala:285:54, :286:43, :288:36]
wire _io_rawOut_sign_T_11 = _io_rawOut_sign_T_7; // @[MulAddRecFN.scala:286:43, :288:48]
wire _io_rawOut_sign_T_9 = io_fromPreMul_signProd_0 | opSignC; // @[MulAddRecFN.scala:169:7, :190:42, :290:37]
wire _io_rawOut_sign_T_12 = ~notNaN_isInfOut; // @[MulAddRecFN.scala:265:44, :291:10]
wire _io_rawOut_sign_T_13 = ~notNaN_addZeros; // @[MulAddRecFN.scala:267:58, :291:31]
wire _io_rawOut_sign_T_14 = _io_rawOut_sign_T_12 & _io_rawOut_sign_T_13; // @[MulAddRecFN.scala:291:{10,28,31}]
wire _io_rawOut_sign_T_15 = io_fromPreMul_CIsDominant_0 ? opSignC : notCDom_sign; // @[MulAddRecFN.scala:169:7, :190:42, :257:12, :292:17]
wire _io_rawOut_sign_T_16 = _io_rawOut_sign_T_14 & _io_rawOut_sign_T_15; // @[MulAddRecFN.scala:291:{28,49}, :292:17]
assign _io_rawOut_sign_T_17 = _io_rawOut_sign_T_11 | _io_rawOut_sign_T_16; // @[MulAddRecFN.scala:288:48, :290:50, :291:49]
assign io_rawOut_sign_0 = _io_rawOut_sign_T_17; // @[MulAddRecFN.scala:169:7, :290:50]
assign _io_rawOut_sExp_T = io_fromPreMul_CIsDominant_0 ? CDom_sExp : notCDom_sExp; // @[MulAddRecFN.scala:169:7, :203:43, :241:46, :293:26]
assign io_rawOut_sExp_0 = _io_rawOut_sExp_T; // @[MulAddRecFN.scala:169:7, :293:26]
assign _io_rawOut_sig_T = io_fromPreMul_CIsDominant_0 ? CDom_sig : notCDom_sig; // @[MulAddRecFN.scala:169:7, :225:12, :251:12, :294:25]
assign io_rawOut_sig_0 = _io_rawOut_sig_T; // @[MulAddRecFN.scala:169:7, :294:25]
assign io_invalidExc = io_invalidExc_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sign = io_rawOut_sign_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sig = io_rawOut_sig_0; // @[MulAddRecFN.scala:169:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_13( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [5:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [5:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59]
wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27]
wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25]
wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21]
wire [8:0] _c_opcodes_set_T = 9'h0; // @[Monitor.scala:767:79]
wire [8:0] _c_sizes_set_T = 9'h0; // @[Monitor.scala:768:77]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_37 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_43 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_49 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28]
wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [5:0] _c_first_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _c_first_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _c_first_WIRE_2_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _c_first_WIRE_3_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _c_set_wo_ready_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _c_set_wo_ready_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _c_set_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _c_set_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _c_opcodes_set_interm_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _c_opcodes_set_interm_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _c_sizes_set_interm_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _c_sizes_set_interm_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _c_opcodes_set_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _c_opcodes_set_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _c_sizes_set_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _c_sizes_set_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _c_probe_ack_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _c_probe_ack_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _c_probe_ack_WIRE_2_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _c_probe_ack_WIRE_3_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _same_cycle_resp_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _same_cycle_resp_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _same_cycle_resp_WIRE_2_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _same_cycle_resp_WIRE_3_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [5:0] _same_cycle_resp_WIRE_4_bits_source = 6'h0; // @[Bundles.scala:265:74]
wire [5:0] _same_cycle_resp_WIRE_5_bits_source = 6'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57]
wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57]
wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [515:0] _c_sizes_set_T_1 = 516'h0; // @[Monitor.scala:768:52]
wire [514:0] _c_opcodes_set_T_1 = 515'h0; // @[Monitor.scala:767:54]
wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59]
wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40]
wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [63:0] _c_set_wo_ready_T = 64'h1; // @[OneHot.scala:58:35]
wire [63:0] _c_set_T = 64'h1; // @[OneHot.scala:58:35]
wire [279:0] c_sizes_set = 280'h0; // @[Monitor.scala:741:34]
wire [139:0] c_opcodes_set = 140'h0; // @[Monitor.scala:740:34]
wire [34:0] c_set = 35'h0; // @[Monitor.scala:738:34]
wire [34:0] c_set_wo_ready = 35'h0; // @[Monitor.scala:739:34]
wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46]
wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76]
wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117]
wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48]
wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119]
wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [5:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 6'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_1 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_7 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_13 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_19 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 4'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 4'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 4'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 4'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = io_in_a_bits_source_0 == 6'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31]
wire _source_ok_T_26 = io_in_a_bits_source_0 == 6'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31]
wire _source_ok_T_27 = io_in_a_bits_source_0 == 6'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31]
wire _source_ok_T_28 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_29 = _source_ok_T_28 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_33 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [28:0] _is_aligned_T = {17'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_34 = io_in_d_bits_source_0 == 6'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_34; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_35 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_41 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_47 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_53 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_36 = _source_ok_T_35 == 4'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_38 = _source_ok_T_36; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_40; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_42 = _source_ok_T_41 == 4'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_44 = _source_ok_T_42; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_46; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_48 = _source_ok_T_47 == 4'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_50 = _source_ok_T_48; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_52; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_54 = _source_ok_T_53 == 4'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_58; // @[Parameters.scala:1138:31]
wire _source_ok_T_59 = io_in_d_bits_source_0 == 6'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_5 = _source_ok_T_59; // @[Parameters.scala:1138:31]
wire _source_ok_T_60 = io_in_d_bits_source_0 == 6'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_60; // @[Parameters.scala:1138:31]
wire _source_ok_T_61 = io_in_d_bits_source_0 == 6'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_61; // @[Parameters.scala:1138:31]
wire _source_ok_T_62 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_63 = _source_ok_T_62 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_64 = _source_ok_T_63 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_65 = _source_ok_T_64 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_67 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1524 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1524; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1524; // @[Decoupled.scala:51:35]
wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [5:0] source; // @[Monitor.scala:390:22]
reg [28:0] address; // @[Monitor.scala:391:22]
wire _T_1597 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1597; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1597; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1597; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [5:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [34:0] inflight; // @[Monitor.scala:614:27]
reg [139:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [279:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [34:0] a_set; // @[Monitor.scala:626:34]
wire [34:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [139:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [279:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [8:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [8:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [8:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [8:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [8:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [139:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [139:0] _a_opcode_lookup_T_6 = {136'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [139:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[139:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [7:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [8:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65]
wire [8:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65]
wire [8:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99]
wire [8:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67]
wire [8:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99]
wire [279:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [279:0] _a_size_lookup_T_6 = {272'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}]
wire [279:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[279:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [63:0] _GEN_3 = 64'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [63:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_3; // @[OneHot.scala:58:35]
wire [63:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_3; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[34:0] : 35'h0; // @[OneHot.scala:58:35]
wire _T_1450 = _T_1524 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1450 ? _a_set_T[34:0] : 35'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1450 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1450 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [8:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [514:0] _a_opcodes_set_T_1 = {511'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1450 ? _a_opcodes_set_T_1[139:0] : 140'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [8:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77]
wire [515:0] _a_sizes_set_T_1 = {511'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1450 ? _a_sizes_set_T_1[279:0] : 280'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [34:0] d_clr; // @[Monitor.scala:664:34]
wire [34:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [139:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [279:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1496 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [63:0] _GEN_5 = 64'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [63:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [63:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [63:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [63:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1496 & ~d_release_ack ? _d_clr_wo_ready_T[34:0] : 35'h0; // @[OneHot.scala:58:35]
wire _T_1465 = _T_1597 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1465 ? _d_clr_T[34:0] : 35'h0; // @[OneHot.scala:58:35]
wire [526:0] _d_opcodes_clr_T_5 = 527'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1465 ? _d_opcodes_clr_T_5[139:0] : 140'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [526:0] _d_sizes_clr_T_5 = 527'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1465 ? _d_sizes_clr_T_5[279:0] : 280'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [34:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [34:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [34:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [139:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [139:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [139:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [279:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [279:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [279:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [34:0] inflight_1; // @[Monitor.scala:726:35]
wire [34:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [139:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [139:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [279:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [279:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [7:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [139:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [139:0] _c_opcode_lookup_T_6 = {136'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [139:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[139:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [279:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [279:0] _c_size_lookup_T_6 = {272'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}]
wire [279:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[279:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [34:0] d_clr_1; // @[Monitor.scala:774:34]
wire [34:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [139:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [279:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1568 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1568 & d_release_ack_1 ? _d_clr_wo_ready_T_1[34:0] : 35'h0; // @[OneHot.scala:58:35]
wire _T_1550 = _T_1597 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1550 ? _d_clr_T_1[34:0] : 35'h0; // @[OneHot.scala:58:35]
wire [526:0] _d_opcodes_clr_T_11 = 527'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1550 ? _d_opcodes_clr_T_11[139:0] : 140'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [526:0] _d_sizes_clr_T_11 = 527'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1550 ? _d_sizes_clr_T_11[279:0] : 280'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 6'h0; // @[Monitor.scala:36:7, :795:113]
wire [34:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [34:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [139:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [139:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [279:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [279:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputUnit_19( // @[InputUnit.scala:158:7]
input clock, // @[InputUnit.scala:158:7]
input reset, // @[InputUnit.scala:158:7]
output [1:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14]
output io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
output [3:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
output io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [3:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14]
output io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_0, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_1, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_2, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_3, // @[InputUnit.scala:170:14]
input io_vcalloc_req_ready, // @[InputUnit.scala:170:14]
output io_vcalloc_req_valid, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_3, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_0, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_0, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_1, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_2, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_3, // @[InputUnit.scala:170:14]
input io_salloc_req_0_ready, // @[InputUnit.scala:170:14]
output io_salloc_req_0_valid, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14]
output io_out_0_valid, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14]
output [36:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14]
output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [3:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14]
output [1:0] io_debug_va_stall, // @[InputUnit.scala:170:14]
output [1:0] io_debug_sa_stall, // @[InputUnit.scala:170:14]
input io_in_flit_0_valid, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14]
input [36:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14]
output [3:0] io_in_credit_return, // @[InputUnit.scala:170:14]
output [3:0] io_in_vc_free // @[InputUnit.scala:170:14]
);
wire vcalloc_vals_3; // @[InputUnit.scala:266:32]
wire vcalloc_vals_2; // @[InputUnit.scala:266:32]
wire vcalloc_vals_1; // @[InputUnit.scala:266:32]
wire vcalloc_vals_0; // @[InputUnit.scala:266:32]
wire _salloc_arb_io_in_0_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_1_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_2_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_3_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26]
wire [3:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26]
wire _route_arbiter_io_in_1_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_2_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_3_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29]
wire [1:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29]
wire _input_buffer_io_deq_0_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28]
wire [36:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28]
wire [36:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28]
wire [36:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28]
wire [36:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28]
reg [2:0] states_0_g; // @[InputUnit.scala:192:19]
reg states_0_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg states_0_vc_sel_0_0; // @[InputUnit.scala:192:19]
reg states_0_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_0_flow_ingress_node; // @[InputUnit.scala:192:19]
reg states_0_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_0_flow_egress_node; // @[InputUnit.scala:192:19]
reg states_0_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_1_g; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_0_0; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_0_1; // @[InputUnit.scala:192:19]
reg states_1_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_1_flow_ingress_node; // @[InputUnit.scala:192:19]
reg states_1_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_1_flow_egress_node; // @[InputUnit.scala:192:19]
reg states_1_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_2_g; // @[InputUnit.scala:192:19]
reg states_2_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg states_2_vc_sel_0_2; // @[InputUnit.scala:192:19]
reg states_2_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_2_flow_ingress_node; // @[InputUnit.scala:192:19]
reg states_2_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_2_flow_egress_node; // @[InputUnit.scala:192:19]
reg states_2_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_3_g; // @[InputUnit.scala:192:19]
reg states_3_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg states_3_vc_sel_0_2; // @[InputUnit.scala:192:19]
reg states_3_vc_sel_0_3; // @[InputUnit.scala:192:19]
reg states_3_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_3_flow_ingress_node; // @[InputUnit.scala:192:19]
reg states_3_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_3_flow_egress_node; // @[InputUnit.scala:192:19]
reg states_3_flow_egress_node_id; // @[InputUnit.scala:192:19]
wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30]
wire route_arbiter_io_in_0_valid = states_0_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22]
wire route_arbiter_io_in_1_valid = states_1_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22]
wire route_arbiter_io_in_2_valid = states_2_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22]
wire route_arbiter_io_in_3_valid = states_3_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22]
reg [3:0] mask; // @[InputUnit.scala:250:21]
wire [3:0] _vcalloc_filter_T_3 = {vcalloc_vals_3, vcalloc_vals_2, vcalloc_vals_1, vcalloc_vals_0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32]
wire [7:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 8'h1 : _vcalloc_filter_T_3[1] ? 8'h2 : _vcalloc_filter_T_3[2] ? 8'h4 : _vcalloc_filter_T_3[3] ? 8'h8 : vcalloc_vals_0 ? 8'h10 : vcalloc_vals_1 ? 8'h20 : vcalloc_vals_2 ? 8'h40 : {vcalloc_vals_3, 7'h0}; // @[OneHot.scala:85:71]
wire [3:0] vcalloc_sel = vcalloc_filter[3:0] | vcalloc_filter[7:4]; // @[Mux.scala:50:70]
wire io_vcalloc_req_valid_0 = vcalloc_vals_0 | vcalloc_vals_1 | vcalloc_vals_2 | vcalloc_vals_3; // @[package.scala:81:59]
assign vcalloc_vals_0 = states_0_g == 3'h2; // @[InputUnit.scala:158:7, :192:19, :266:32]
assign vcalloc_vals_1 = states_1_g == 3'h2; // @[InputUnit.scala:158:7, :192:19, :266:32]
assign vcalloc_vals_2 = states_2_g == 3'h2; // @[InputUnit.scala:158:7, :192:19, :266:32]
assign vcalloc_vals_3 = states_3_g == 3'h2; // @[InputUnit.scala:158:7, :192:19, :266:32]
wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
wire _GEN_1 = _GEN_0 & vcalloc_sel[0]; // @[Mux.scala:32:36]
wire _GEN_2 = _GEN_0 & vcalloc_sel[1]; // @[Mux.scala:32:36]
wire _GEN_3 = _GEN_0 & vcalloc_sel[2]; // @[Mux.scala:32:36]
wire _GEN_4 = _GEN_0 & vcalloc_sel[3]; // @[Mux.scala:32:36] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_453( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid, // @[PE.scala:35:14]
output io_bad_dataflow // @[PE.scala:35:14]
);
wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24]
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [31:0] c1; // @[PE.scala:70:15]
wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [31:0] c2; // @[PE.scala:71:15]
wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25]
wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61]
wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38]
wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38]
assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16]
assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10]
wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10]
c1 <= _GEN_7; // @[PE.scala:70:15, :124:10]
if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30]
end
else // @[PE.scala:71:15, :118:101, :119:30]
c2 <= _GEN_7; // @[PE.scala:71:15, :124:10]
end
else begin // @[PE.scala:31:7]
c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10]
c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10]
end
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
end
always @(posedge)
MacUnit_197 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24]
.io_out_d (_mac_unit_io_out_d)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File IterativeTrapCheck.scala:
package saturn.frontend
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.diplomacy._
import saturn.common._
class IndexMaskAccess(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val in = Input(Bool())
val inst = Input(new VectorIssueInst)
val index_access = Flipped(new VectorIndexAccessIO)
val mask_access = Flipped(new VectorMaskAccessIO)
val access = new Bundle {
val ready = Output(Bool())
val eidx = Input(UInt(log2Ceil(maxVLMax).W))
val index = Output(UInt(64.W))
val mask = Output(Bool())
}
val pop = Input(Valid(UInt(log2Ceil(maxVLMax).W)))
val flush = Input(Bool())
})
val valid = RegInit(false.B)
val eidx = Reg(UInt(log2Ceil(maxVLMax).W))
// This all works only with pow2 buffers and eidx starting at 0
val valids = Reg(Vec(4, Bool()))
val indices = Reg(Vec(4, UInt(64.W)))
val masks = Reg(Vec(4, Bool()))
when (io.in) {
assert(!valid)
valid := true.B
eidx := 0.U
valids.foreach(_ := false.B)
}
val needs_index = io.inst.mop.isOneOf(mopOrdered, mopUnordered)
val needs_mask = !io.inst.vm
val index_ready = io.index_access.ready || !needs_index
val mask_ready = io.mask_access.ready || !needs_mask
io.index_access.valid := valid && needs_index && !valids(eidx(1,0))
io.mask_access.valid := valid && needs_mask && !valids(eidx(1,0))
io.index_access.vrs := io.inst.rs2
io.index_access.eidx := eidx
io.index_access.eew := io.inst.mem_idx_size
io.mask_access.eidx := eidx
when (valid && index_ready && mask_ready && !valids(eidx(1,0))) {
val next_eidx = eidx +& 1.U
eidx := eidx + 1.U
when (next_eidx === io.inst.vconfig.vl) {
valid := false.B
}
valids(eidx(1,0)) := true.B
indices(eidx(1,0)) := io.index_access.idx
masks(eidx(1,0)) := io.mask_access.mask
}
io.access.ready := valids(io.access.eidx(1,0))
io.access.index := indices(io.access.eidx(1,0))
io.access.mask := masks(io.access.eidx(1,0))
when (io.pop.fire) {
valids(io.pop.bits(1,0)) := false.B
}
when (io.flush) {
valid := false.B
}
}
class IterativeTrapCheck(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val status = Input(new MStatus)
val in = Input(Valid(new VectorIssueInst))
val busy = Output(Bool())
val s0_tlb_req = Valid(new TLBReq(3))
val s1_tlb_req = Valid(new TLBReq(3))
val tlb_resp = Input(new TLBResp)
val retire = Output(Bool())
val pc = Output(UInt(vaddrBitsExtended.W))
val vstart = Valid(UInt(log2Ceil(maxVLMax).W))
val vconfig = Valid(new VConfig)
val xcpt = Valid(new Bundle {
val cause = UInt(xLen.W)
val tval = UInt(coreMaxAddrBits.W)
})
val inst = Output(new VectorIssueInst)
val issue = Decoupled(new VectorIssueInst)
val index_access = Flipped(new VectorIndexAccessIO)
val mask_access = Flipped(new VectorMaskAccessIO)
})
val replay_kill = WireInit(false.B)
def nextPage(addr: UInt) = ((addr + (1 << pgIdxBits).U) >> pgIdxBits) << pgIdxBits
val valid = RegInit(false.B)
val seg_hi = Reg(Bool())
val inst = Reg(new VectorIssueInst)
val eidx = Reg(UInt(log2Ceil(maxVLMax).W))
val addr = Reg(UInt(vaddrBitsExtended.W))
val tlb_backoff = RegInit(0.U(2.W))
when (tlb_backoff =/= 0.U) { tlb_backoff := tlb_backoff - 1.U }
val im_access = Module(new IndexMaskAccess)
im_access.io.in := io.in.valid
im_access.io.inst := inst
im_access.io.index_access <> io.index_access
im_access.io.mask_access <> io.mask_access
when (io.in.valid) {
assert(!valid)
valid := true.B
seg_hi := false.B
inst := io.in.bits
eidx := 0.U
addr := io.in.bits.rs1_data
}
val stride = MuxLookup(inst.mop, 0.U)(Seq(
(mopUnit -> ((inst.seg_nf +& 1.U) << inst.mem_elem_size)),
(mopStrided -> inst.rs2_data)
))
val indexed = inst.mop.isOneOf(mopOrdered, mopUnordered)
val index_ready = !indexed || im_access.io.access.ready
val mask_ready = inst.vm || im_access.io.access.ready
val index = Mux(indexed, im_access.io.access.index & eewBitMask(inst.mem_idx_size), 0.U)
val base = Mux(indexed, inst.rs1_data, addr)
val indexaddr = base + index
val tlb_addr = Mux(seg_hi, nextPage(indexaddr), indexaddr)
val seg_nf_consumed = ((1 << pgIdxBits).U - Mux(seg_hi, indexaddr, tlb_addr)(pgIdxBits-1,0)) >> inst.mem_elem_size
val seg_single_page = seg_nf_consumed >= (inst.seg_nf +& 1.U)
val masked = !im_access.io.access.mask && !inst.vm
val tlb_valid = eidx < inst.vconfig.vl && eidx >= inst.vstart && !masked
val ff = inst.umop === lumopFF && inst.mop === mopUnit
io.busy := valid
io.inst := inst
im_access.io.access.eidx := eidx
io.s0_tlb_req.valid := tlb_valid && tlb_backoff === 0.U && index_ready && mask_ready
io.s0_tlb_req.bits.vaddr := tlb_addr
io.s0_tlb_req.bits.passthrough := false.B
io.s0_tlb_req.bits.size := inst.mem_elem_size
io.s0_tlb_req.bits.cmd := Mux(inst.opcode(5), M_XWR, M_XRD)
io.s0_tlb_req.bits.prv := io.status.prv
io.s0_tlb_req.bits.v := io.status.v
io.s1_tlb_req.valid := RegEnable(io.s0_tlb_req.valid, false.B, valid)
io.s1_tlb_req.bits := RegEnable(io.s0_tlb_req.bits, valid)
val replay_fire = valid && eidx < inst.vconfig.vl && tlb_backoff === 0.U && index_ready && mask_ready
when (replay_fire) {
when (seg_hi || seg_single_page || inst.seg_nf === 0.U) {
eidx := eidx + 1.U
addr := addr + stride
seg_hi := false.B
} .otherwise {
seg_hi := true.B
}
}
val s1_valid = RegNext(replay_fire && !replay_kill, false.B)
val s1_eidx = RegEnable(eidx, valid)
val s1_masked = RegEnable(masked, valid)
val s1_seg_hi = RegEnable(seg_hi, valid)
val s1_base = RegEnable(base, valid)
val s1_tlb_valid = RegEnable(tlb_valid, valid)
val s1_tlb_addr = RegEnable(tlb_addr, valid)
val s1_seg_nf_consumed = RegEnable(seg_nf_consumed, valid)
val s1_seg_single_page = RegEnable(seg_single_page, valid)
when (io.tlb_resp.miss && s1_valid && tlb_backoff === 0.U) { tlb_backoff := 3.U }
val tlb_resp = WireInit(io.tlb_resp)
when (!s1_tlb_valid) {
tlb_resp.miss := false.B
}
val xcpts = Seq(
(tlb_resp.pf.st, Causes.store_page_fault.U),
(tlb_resp.pf.ld, Causes.load_page_fault.U),
(tlb_resp.gf.st, Causes.store_guest_page_fault.U),
(tlb_resp.gf.ld, Causes.load_guest_page_fault.U),
(tlb_resp.ae.st, Causes.store_access.U),
(tlb_resp.ae.ld, Causes.load_access.U),
(tlb_resp.ma.st, Causes.misaligned_store.U),
(tlb_resp.ma.ld, Causes.misaligned_load.U)
)
val xcpt = xcpts.map(_._1).orR && s1_eidx >= inst.vstart && !s1_masked
val cause = PriorityMux(xcpts)
io.issue.valid := false.B
io.issue.bits := inst
io.issue.bits.vstart := s1_eidx
io.issue.bits.vconfig.vl := s1_eidx +& 1.U
io.issue.bits.segend := inst.seg_nf
io.issue.bits.segstart := 0.U
io.issue.bits.page := tlb_resp.paddr >> pgIdxBits
io.xcpt.valid := false.B
io.pc := inst.pc
io.xcpt.bits.cause := cause
io.xcpt.bits.tval := s1_tlb_addr
io.vstart.valid := false.B
io.vstart.bits := s1_eidx
io.retire := false.B
io.vconfig.valid := false.B
io.vconfig.bits := inst.vconfig
io.vconfig.bits.vl := s1_eidx
im_access.io.pop.valid := false.B
im_access.io.pop.bits := s1_eidx
im_access.io.flush := false.B
when (s1_valid) {
io.issue.valid := !tlb_resp.miss && !xcpt && s1_eidx >= inst.vstart && !s1_masked
when (inst.seg_nf =/= 0.U && !s1_seg_single_page) {
when (!s1_seg_hi) {
io.issue.bits.segend := s1_seg_nf_consumed - 1.U
} .otherwise {
io.issue.bits.segstart := s1_seg_nf_consumed
}
}
when (s1_seg_hi || s1_seg_single_page || inst.seg_nf === 0.U) {
im_access.io.pop.valid := true.B
}
when (tlb_resp.miss || !io.issue.ready) {
tlb_backoff := 3.U
replay_kill := true.B
eidx := s1_eidx
addr := s1_base
seg_hi := s1_seg_hi
im_access.io.pop.valid := false.B
} .elsewhen (xcpt) {
val ff_nofault = ff && s1_eidx =/= 0.U
valid := false.B
replay_kill := true.B
io.retire := ff_nofault
io.xcpt.valid := !ff_nofault
io.vstart.valid := !ff_nofault
io.vconfig.valid := ff_nofault
im_access.io.flush := true.B
} .elsewhen ((s1_eidx +& 1.U) === inst.vconfig.vl && (s1_seg_hi || s1_seg_single_page || inst.seg_nf === 0.U)) {
valid := false.B
replay_kill := true.B
io.retire := true.B
io.vstart.valid := true.B
io.vstart.bits := 0.U
im_access.io.flush := true.B
}
}
}
| module IterativeTrapCheck( // @[IterativeTrapCheck.scala:83:7]
input clock, // @[IterativeTrapCheck.scala:83:7]
input reset, // @[IterativeTrapCheck.scala:83:7]
input [1:0] io_status_prv, // @[IterativeTrapCheck.scala:84:14]
input io_in_valid, // @[IterativeTrapCheck.scala:84:14]
input [39:0] io_in_bits_pc, // @[IterativeTrapCheck.scala:84:14]
input [31:0] io_in_bits_bits, // @[IterativeTrapCheck.scala:84:14]
input [6:0] io_in_bits_vconfig_vl, // @[IterativeTrapCheck.scala:84:14]
input io_in_bits_vconfig_vtype_vill, // @[IterativeTrapCheck.scala:84:14]
input [54:0] io_in_bits_vconfig_vtype_reserved, // @[IterativeTrapCheck.scala:84:14]
input io_in_bits_vconfig_vtype_vma, // @[IterativeTrapCheck.scala:84:14]
input io_in_bits_vconfig_vtype_vta, // @[IterativeTrapCheck.scala:84:14]
input [2:0] io_in_bits_vconfig_vtype_vsew, // @[IterativeTrapCheck.scala:84:14]
input io_in_bits_vconfig_vtype_vlmul_sign, // @[IterativeTrapCheck.scala:84:14]
input [1:0] io_in_bits_vconfig_vtype_vlmul_mag, // @[IterativeTrapCheck.scala:84:14]
input [5:0] io_in_bits_vstart, // @[IterativeTrapCheck.scala:84:14]
input [63:0] io_in_bits_rs1_data, // @[IterativeTrapCheck.scala:84:14]
input [63:0] io_in_bits_rs2_data, // @[IterativeTrapCheck.scala:84:14]
input [2:0] io_in_bits_rm, // @[IterativeTrapCheck.scala:84:14]
input [1:0] io_in_bits_emul, // @[IterativeTrapCheck.scala:84:14]
input [1:0] io_in_bits_mop, // @[IterativeTrapCheck.scala:84:14]
output io_busy, // @[IterativeTrapCheck.scala:84:14]
output io_s0_tlb_req_valid, // @[IterativeTrapCheck.scala:84:14]
output [39:0] io_s0_tlb_req_bits_vaddr, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_s0_tlb_req_bits_size, // @[IterativeTrapCheck.scala:84:14]
output [4:0] io_s0_tlb_req_bits_cmd, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_s0_tlb_req_bits_prv, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_miss, // @[IterativeTrapCheck.scala:84:14]
input [31:0] io_tlb_resp_paddr, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_pf_ld, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_pf_st, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_ae_ld, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_ae_st, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_ma_ld, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_ma_st, // @[IterativeTrapCheck.scala:84:14]
output io_retire, // @[IterativeTrapCheck.scala:84:14]
output [39:0] io_pc, // @[IterativeTrapCheck.scala:84:14]
output io_vstart_valid, // @[IterativeTrapCheck.scala:84:14]
output [5:0] io_vstart_bits, // @[IterativeTrapCheck.scala:84:14]
output io_vconfig_valid, // @[IterativeTrapCheck.scala:84:14]
output [6:0] io_vconfig_bits_vl, // @[IterativeTrapCheck.scala:84:14]
output io_vconfig_bits_vtype_vill, // @[IterativeTrapCheck.scala:84:14]
output [54:0] io_vconfig_bits_vtype_reserved, // @[IterativeTrapCheck.scala:84:14]
output io_vconfig_bits_vtype_vma, // @[IterativeTrapCheck.scala:84:14]
output io_vconfig_bits_vtype_vta, // @[IterativeTrapCheck.scala:84:14]
output [2:0] io_vconfig_bits_vtype_vsew, // @[IterativeTrapCheck.scala:84:14]
output io_vconfig_bits_vtype_vlmul_sign, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_vconfig_bits_vtype_vlmul_mag, // @[IterativeTrapCheck.scala:84:14]
output io_xcpt_valid, // @[IterativeTrapCheck.scala:84:14]
output [63:0] io_xcpt_bits_cause, // @[IterativeTrapCheck.scala:84:14]
output [39:0] io_xcpt_bits_tval, // @[IterativeTrapCheck.scala:84:14]
output [31:0] io_inst_bits, // @[IterativeTrapCheck.scala:84:14]
input io_issue_ready, // @[IterativeTrapCheck.scala:84:14]
output io_issue_valid, // @[IterativeTrapCheck.scala:84:14]
output [31:0] io_issue_bits_bits, // @[IterativeTrapCheck.scala:84:14]
output [6:0] io_issue_bits_vconfig_vl, // @[IterativeTrapCheck.scala:84:14]
output [2:0] io_issue_bits_vconfig_vtype_vsew, // @[IterativeTrapCheck.scala:84:14]
output io_issue_bits_vconfig_vtype_vlmul_sign, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_issue_bits_vconfig_vtype_vlmul_mag, // @[IterativeTrapCheck.scala:84:14]
output [5:0] io_issue_bits_vstart, // @[IterativeTrapCheck.scala:84:14]
output [2:0] io_issue_bits_segstart, // @[IterativeTrapCheck.scala:84:14]
output [2:0] io_issue_bits_segend, // @[IterativeTrapCheck.scala:84:14]
output [63:0] io_issue_bits_rs1_data, // @[IterativeTrapCheck.scala:84:14]
output [63:0] io_issue_bits_rs2_data, // @[IterativeTrapCheck.scala:84:14]
output [19:0] io_issue_bits_page, // @[IterativeTrapCheck.scala:84:14]
output [2:0] io_issue_bits_rm, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_issue_bits_emul, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_issue_bits_mop, // @[IterativeTrapCheck.scala:84:14]
input io_index_access_ready, // @[IterativeTrapCheck.scala:84:14]
output io_index_access_valid, // @[IterativeTrapCheck.scala:84:14]
output [4:0] io_index_access_vrs, // @[IterativeTrapCheck.scala:84:14]
output [6:0] io_index_access_eidx, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_index_access_eew, // @[IterativeTrapCheck.scala:84:14]
input [63:0] io_index_access_idx, // @[IterativeTrapCheck.scala:84:14]
input io_mask_access_ready, // @[IterativeTrapCheck.scala:84:14]
output io_mask_access_valid, // @[IterativeTrapCheck.scala:84:14]
output [6:0] io_mask_access_eidx, // @[IterativeTrapCheck.scala:84:14]
input io_mask_access_mask // @[IterativeTrapCheck.scala:84:14]
);
wire _im_access_io_access_ready; // @[IterativeTrapCheck.scala:118:25]
wire [63:0] _im_access_io_access_index; // @[IterativeTrapCheck.scala:118:25]
wire _im_access_io_access_mask; // @[IterativeTrapCheck.scala:118:25]
reg valid; // @[IterativeTrapCheck.scala:110:23]
reg seg_hi; // @[IterativeTrapCheck.scala:111:19]
reg [39:0] inst_pc; // @[IterativeTrapCheck.scala:112:19]
reg [31:0] inst_bits; // @[IterativeTrapCheck.scala:112:19]
reg [6:0] inst_vconfig_vl; // @[IterativeTrapCheck.scala:112:19]
reg inst_vconfig_vtype_vill; // @[IterativeTrapCheck.scala:112:19]
reg [54:0] inst_vconfig_vtype_reserved; // @[IterativeTrapCheck.scala:112:19]
reg inst_vconfig_vtype_vma; // @[IterativeTrapCheck.scala:112:19]
reg inst_vconfig_vtype_vta; // @[IterativeTrapCheck.scala:112:19]
reg [2:0] inst_vconfig_vtype_vsew; // @[IterativeTrapCheck.scala:112:19]
reg inst_vconfig_vtype_vlmul_sign; // @[IterativeTrapCheck.scala:112:19]
reg [1:0] inst_vconfig_vtype_vlmul_mag; // @[IterativeTrapCheck.scala:112:19]
reg [5:0] inst_vstart; // @[IterativeTrapCheck.scala:112:19]
reg [63:0] inst_rs1_data; // @[IterativeTrapCheck.scala:112:19]
reg [63:0] inst_rs2_data; // @[IterativeTrapCheck.scala:112:19]
reg [2:0] inst_rm; // @[IterativeTrapCheck.scala:112:19]
reg [1:0] inst_emul; // @[IterativeTrapCheck.scala:112:19]
reg [1:0] inst_mop; // @[IterativeTrapCheck.scala:112:19]
reg [5:0] eidx; // @[IterativeTrapCheck.scala:113:19]
reg [39:0] addr; // @[IterativeTrapCheck.scala:114:19]
reg [1:0] tlb_backoff; // @[IterativeTrapCheck.scala:115:28] |
Generate the Verilog code corresponding to the following Chisel files.
File TLInterface.scala:
package saturn.mem
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.diplomacy._
import saturn.common._
class TLInterface(tagBits: Int)(implicit p: Parameters) extends LazyModule()(p) with HasCoreParameters {
val node = TLClientNode(Seq(TLMasterPortParameters.v1(Seq(TLMasterParameters.v1(
name = s"Core ${tileId} Vector Load",
sourceId = IdRange(0, 1 << tagBits)
)))))
override lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val (out, edge) = node.out(0)
val widthBytes = edge.slave.beatBytes
val offBits = log2Ceil(widthBytes)
val io = IO(new Bundle {
val busy = Output(Bool())
val req = Flipped(Decoupled(new MemRequest(widthBytes, tagBits)))
val resp = Valid(new MemResponse(widthBytes, tagBits))
})
val inflights = RegInit(0.U(tagBits.W))
when (out.a.fire || out.d.fire) {
inflights := inflights + out.a.fire - out.d.fire
}
io.busy := inflights =/= 0.U
io.req.ready := out.a.ready
out.a.valid := io.req.valid
out.a.bits := Mux(io.req.bits.store,
edge.Put(
io.req.bits.tag,
(io.req.bits.addr >> offBits) << offBits,
log2Ceil(widthBytes).U,
io.req.bits.data,
io.req.bits.mask)._2,
edge.Get(
io.req.bits.tag,
(io.req.bits.addr >> offBits) << offBits,
log2Ceil(widthBytes).U)._2
)
out.d.ready := true.B
io.resp.valid := out.d.valid
io.resp.bits.data := out.d.bits.data
io.resp.bits.tag := out.d.bits.source
}
}
class TLSplitInterface(implicit p: Parameters) extends LazyModule()(p) with HasCoreParameters with HasVectorParams {
val reader = LazyModule(new TLInterface(dmemTagBits))
val writer = LazyModule(new TLInterface(dmemTagBits))
val arb = LazyModule(new TLXbar)
def node = TLWidthWidget(dLenB) := arb.node
def edge = arb.node.edges.out(0)
arb.node := reader.node
arb.node := writer.node
override lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val vec = Flipped(new VectorMemIO)
val mem_busy = Output(Bool())
})
reader.module.io.req <> io.vec.load_req
io.vec.load_resp <> reader.module.io.resp
writer.module.io.req <> io.vec.store_req
io.vec.store_ack <> writer.module.io.resp
io.mem_busy := reader.module.io.busy || writer.module.io.busy
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module TLSplitInterface( // @[TLInterface.scala:75:9]
input clock, // @[TLInterface.scala:75:9]
input reset, // @[TLInterface.scala:75:9]
input auto_arb_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_arb_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_arb_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_arb_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_arb_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_arb_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_arb_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_arb_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_arb_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_arb_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_arb_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_arb_anon_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_arb_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_arb_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_arb_anon_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_arb_anon_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_arb_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_arb_anon_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output io_vec_load_req_ready, // @[TLInterface.scala:76:16]
input io_vec_load_req_valid, // @[TLInterface.scala:76:16]
input [39:0] io_vec_load_req_bits_addr, // @[TLInterface.scala:76:16]
input [3:0] io_vec_load_req_bits_tag, // @[TLInterface.scala:76:16]
output io_vec_load_resp_valid, // @[TLInterface.scala:76:16]
output [63:0] io_vec_load_resp_bits_data, // @[TLInterface.scala:76:16]
output [3:0] io_vec_load_resp_bits_tag, // @[TLInterface.scala:76:16]
output io_vec_store_req_ready, // @[TLInterface.scala:76:16]
input io_vec_store_req_valid, // @[TLInterface.scala:76:16]
input [39:0] io_vec_store_req_bits_addr, // @[TLInterface.scala:76:16]
input [63:0] io_vec_store_req_bits_data, // @[TLInterface.scala:76:16]
input [7:0] io_vec_store_req_bits_mask, // @[TLInterface.scala:76:16]
input [3:0] io_vec_store_req_bits_tag, // @[TLInterface.scala:76:16]
output io_vec_store_ack_valid, // @[TLInterface.scala:76:16]
output [3:0] io_vec_store_ack_bits_tag, // @[TLInterface.scala:76:16]
output io_mem_busy // @[TLInterface.scala:76:16]
);
wire _arb_auto_anon_in_1_a_ready; // @[TLInterface.scala:67:23]
wire _arb_auto_anon_in_1_d_valid; // @[TLInterface.scala:67:23]
wire [3:0] _arb_auto_anon_in_1_d_bits_source; // @[TLInterface.scala:67:23]
wire [63:0] _arb_auto_anon_in_1_d_bits_data; // @[TLInterface.scala:67:23]
wire _arb_auto_anon_in_0_a_ready; // @[TLInterface.scala:67:23]
wire _arb_auto_anon_in_0_d_valid; // @[TLInterface.scala:67:23]
wire [3:0] _arb_auto_anon_in_0_d_bits_source; // @[TLInterface.scala:67:23]
wire [63:0] _arb_auto_anon_in_0_d_bits_data; // @[TLInterface.scala:67:23]
wire _writer_auto_out_a_valid; // @[TLInterface.scala:65:26]
wire [2:0] _writer_auto_out_a_bits_opcode; // @[TLInterface.scala:65:26]
wire [3:0] _writer_auto_out_a_bits_source; // @[TLInterface.scala:65:26]
wire [31:0] _writer_auto_out_a_bits_address; // @[TLInterface.scala:65:26]
wire [7:0] _writer_auto_out_a_bits_mask; // @[TLInterface.scala:65:26]
wire [63:0] _writer_auto_out_a_bits_data; // @[TLInterface.scala:65:26]
wire _writer_io_busy; // @[TLInterface.scala:65:26]
wire _reader_auto_out_a_valid; // @[TLInterface.scala:64:26]
wire [2:0] _reader_auto_out_a_bits_opcode; // @[TLInterface.scala:64:26]
wire [3:0] _reader_auto_out_a_bits_source; // @[TLInterface.scala:64:26]
wire [31:0] _reader_auto_out_a_bits_address; // @[TLInterface.scala:64:26]
wire [7:0] _reader_auto_out_a_bits_mask; // @[TLInterface.scala:64:26]
wire [63:0] _reader_auto_out_a_bits_data; // @[TLInterface.scala:64:26]
wire _reader_io_busy; // @[TLInterface.scala:64:26]
TLInterface reader ( // @[TLInterface.scala:64:26]
.clock (clock),
.reset (reset),
.auto_out_a_ready (_arb_auto_anon_in_0_a_ready), // @[TLInterface.scala:67:23]
.auto_out_a_valid (_reader_auto_out_a_valid),
.auto_out_a_bits_opcode (_reader_auto_out_a_bits_opcode),
.auto_out_a_bits_source (_reader_auto_out_a_bits_source),
.auto_out_a_bits_address (_reader_auto_out_a_bits_address),
.auto_out_a_bits_mask (_reader_auto_out_a_bits_mask),
.auto_out_a_bits_data (_reader_auto_out_a_bits_data),
.auto_out_d_valid (_arb_auto_anon_in_0_d_valid), // @[TLInterface.scala:67:23]
.auto_out_d_bits_source (_arb_auto_anon_in_0_d_bits_source), // @[TLInterface.scala:67:23]
.auto_out_d_bits_data (_arb_auto_anon_in_0_d_bits_data), // @[TLInterface.scala:67:23]
.io_busy (_reader_io_busy),
.io_req_ready (io_vec_load_req_ready),
.io_req_valid (io_vec_load_req_valid),
.io_req_bits_addr (io_vec_load_req_bits_addr),
.io_req_bits_data (64'h0), // @[TLInterface.scala:64:26, :76:16]
.io_req_bits_mask (8'hFF), // @[TLInterface.scala:64:26, :76:16]
.io_req_bits_tag (io_vec_load_req_bits_tag),
.io_req_bits_store (1'h0), // @[TLInterface.scala:64:26, :65:26, :67:23, :75:9, :76:16]
.io_resp_valid (io_vec_load_resp_valid),
.io_resp_bits_data (io_vec_load_resp_bits_data),
.io_resp_bits_tag (io_vec_load_resp_bits_tag)
); // @[TLInterface.scala:64:26]
TLInterface writer ( // @[TLInterface.scala:65:26]
.clock (clock),
.reset (reset),
.auto_out_a_ready (_arb_auto_anon_in_1_a_ready), // @[TLInterface.scala:67:23]
.auto_out_a_valid (_writer_auto_out_a_valid),
.auto_out_a_bits_opcode (_writer_auto_out_a_bits_opcode),
.auto_out_a_bits_source (_writer_auto_out_a_bits_source),
.auto_out_a_bits_address (_writer_auto_out_a_bits_address),
.auto_out_a_bits_mask (_writer_auto_out_a_bits_mask),
.auto_out_a_bits_data (_writer_auto_out_a_bits_data),
.auto_out_d_valid (_arb_auto_anon_in_1_d_valid), // @[TLInterface.scala:67:23]
.auto_out_d_bits_source (_arb_auto_anon_in_1_d_bits_source), // @[TLInterface.scala:67:23]
.auto_out_d_bits_data (_arb_auto_anon_in_1_d_bits_data), // @[TLInterface.scala:67:23]
.io_busy (_writer_io_busy),
.io_req_ready (io_vec_store_req_ready),
.io_req_valid (io_vec_store_req_valid),
.io_req_bits_addr (io_vec_store_req_bits_addr),
.io_req_bits_data (io_vec_store_req_bits_data),
.io_req_bits_mask (io_vec_store_req_bits_mask),
.io_req_bits_tag (io_vec_store_req_bits_tag),
.io_req_bits_store (1'h1), // @[TLInterface.scala:65:26, :76:16]
.io_resp_valid (io_vec_store_ack_valid),
.io_resp_bits_data (/* unused */),
.io_resp_bits_tag (io_vec_store_ack_bits_tag)
); // @[TLInterface.scala:65:26]
TLXbar_i2_o1_a32d64s5k3z4u arb ( // @[TLInterface.scala:67:23]
.clock (clock),
.reset (reset),
.auto_anon_in_1_a_ready (_arb_auto_anon_in_1_a_ready),
.auto_anon_in_1_a_valid (_writer_auto_out_a_valid), // @[TLInterface.scala:65:26]
.auto_anon_in_1_a_bits_opcode (_writer_auto_out_a_bits_opcode), // @[TLInterface.scala:65:26]
.auto_anon_in_1_a_bits_source (_writer_auto_out_a_bits_source), // @[TLInterface.scala:65:26]
.auto_anon_in_1_a_bits_address (_writer_auto_out_a_bits_address), // @[TLInterface.scala:65:26]
.auto_anon_in_1_a_bits_mask (_writer_auto_out_a_bits_mask), // @[TLInterface.scala:65:26]
.auto_anon_in_1_a_bits_data (_writer_auto_out_a_bits_data), // @[TLInterface.scala:65:26]
.auto_anon_in_1_d_valid (_arb_auto_anon_in_1_d_valid),
.auto_anon_in_1_d_bits_source (_arb_auto_anon_in_1_d_bits_source),
.auto_anon_in_1_d_bits_data (_arb_auto_anon_in_1_d_bits_data),
.auto_anon_in_0_a_ready (_arb_auto_anon_in_0_a_ready),
.auto_anon_in_0_a_valid (_reader_auto_out_a_valid), // @[TLInterface.scala:64:26]
.auto_anon_in_0_a_bits_opcode (_reader_auto_out_a_bits_opcode), // @[TLInterface.scala:64:26]
.auto_anon_in_0_a_bits_source (_reader_auto_out_a_bits_source), // @[TLInterface.scala:64:26]
.auto_anon_in_0_a_bits_address (_reader_auto_out_a_bits_address), // @[TLInterface.scala:64:26]
.auto_anon_in_0_a_bits_mask (_reader_auto_out_a_bits_mask), // @[TLInterface.scala:64:26]
.auto_anon_in_0_a_bits_data (_reader_auto_out_a_bits_data), // @[TLInterface.scala:64:26]
.auto_anon_in_0_d_valid (_arb_auto_anon_in_0_d_valid),
.auto_anon_in_0_d_bits_source (_arb_auto_anon_in_0_d_bits_source),
.auto_anon_in_0_d_bits_data (_arb_auto_anon_in_0_d_bits_data),
.auto_anon_out_a_ready (auto_arb_anon_out_a_ready),
.auto_anon_out_a_valid (auto_arb_anon_out_a_valid),
.auto_anon_out_a_bits_opcode (auto_arb_anon_out_a_bits_opcode),
.auto_anon_out_a_bits_size (auto_arb_anon_out_a_bits_size),
.auto_anon_out_a_bits_source (auto_arb_anon_out_a_bits_source),
.auto_anon_out_a_bits_address (auto_arb_anon_out_a_bits_address),
.auto_anon_out_a_bits_mask (auto_arb_anon_out_a_bits_mask),
.auto_anon_out_a_bits_data (auto_arb_anon_out_a_bits_data),
.auto_anon_out_d_ready (auto_arb_anon_out_d_ready),
.auto_anon_out_d_valid (auto_arb_anon_out_d_valid),
.auto_anon_out_d_bits_opcode (auto_arb_anon_out_d_bits_opcode),
.auto_anon_out_d_bits_param (auto_arb_anon_out_d_bits_param),
.auto_anon_out_d_bits_size (auto_arb_anon_out_d_bits_size),
.auto_anon_out_d_bits_source (auto_arb_anon_out_d_bits_source),
.auto_anon_out_d_bits_sink (auto_arb_anon_out_d_bits_sink),
.auto_anon_out_d_bits_denied (auto_arb_anon_out_d_bits_denied),
.auto_anon_out_d_bits_data (auto_arb_anon_out_d_bits_data),
.auto_anon_out_d_bits_corrupt (auto_arb_anon_out_d_bits_corrupt)
); // @[TLInterface.scala:67:23]
assign io_mem_busy = _reader_io_busy | _writer_io_busy; // @[TLInterface.scala:64:26, :65:26, :75:9, :85:42]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
File issue-slot.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Issue Slot Logic
//--------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot.
// TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores.
// TODO Disable ldspec for FP queue.
package boom.v4.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v4.common._
import boom.v4.util._
class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle
{
val valid = Output(Bool())
val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely?
val request = Output(Bool())
val grant = Input(Bool())
val iss_uop = Output(new MicroOp())
val in_uop = Input(Valid(new MicroOp())) // if valid, this WILL overwrite an entry!
val out_uop = Output(new MicroOp())
val brupdate = Input(new BrUpdateInfo())
val kill = Input(Bool()) // pipeline flush
val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant)
val squash_grant = Input(Bool())
val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new Wakeup)))
val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W)))
val child_rebusys = Input(UInt(aluWidth.W))
}
class IssueSlot(val numWakeupPorts: Int, val isMem: Boolean, val isFp: Boolean)(implicit p: Parameters)
extends BoomModule
{
val io = IO(new IssueSlotIO(numWakeupPorts))
val slot_valid = RegInit(false.B)
val slot_uop = Reg(new MicroOp())
val next_valid = WireInit(slot_valid)
val next_uop = WireInit(UpdateBrMask(io.brupdate, slot_uop))
val killed = IsKilledByBranch(io.brupdate, io.kill, slot_uop)
io.valid := slot_valid
io.out_uop := next_uop
io.will_be_valid := next_valid && !killed
when (io.kill) {
slot_valid := false.B
} .elsewhen (io.in_uop.valid) {
slot_valid := true.B
} .elsewhen (io.clear) {
slot_valid := false.B
} .otherwise {
slot_valid := next_valid && !killed
}
when (io.in_uop.valid) {
slot_uop := io.in_uop.bits
assert (!slot_valid || io.clear || io.kill)
} .otherwise {
slot_uop := next_uop
}
// Wakeups
next_uop.iw_p1_bypass_hint := false.B
next_uop.iw_p2_bypass_hint := false.B
next_uop.iw_p3_bypass_hint := false.B
next_uop.iw_p1_speculative_child := 0.U
next_uop.iw_p2_speculative_child := 0.U
val rebusied_prs1 = WireInit(false.B)
val rebusied_prs2 = WireInit(false.B)
val rebusied = rebusied_prs1 || rebusied_prs2
val prs1_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs1 }
val prs2_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs2 }
val prs3_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs3 }
val prs1_wakeups = (io.wakeup_ports zip prs1_matches).map { case (w,m) => w.valid && m }
val prs2_wakeups = (io.wakeup_ports zip prs2_matches).map { case (w,m) => w.valid && m }
val prs3_wakeups = (io.wakeup_ports zip prs3_matches).map { case (w,m) => w.valid && m }
val prs1_rebusys = (io.wakeup_ports zip prs1_matches).map { case (w,m) => w.bits.rebusy && m }
val prs2_rebusys = (io.wakeup_ports zip prs2_matches).map { case (w,m) => w.bits.rebusy && m }
val bypassables = io.wakeup_ports.map { w => w.bits.bypassable }
val speculative_masks = io.wakeup_ports.map { w => w.bits.speculative_mask }
when (prs1_wakeups.reduce(_||_)) {
next_uop.prs1_busy := false.B
next_uop.iw_p1_speculative_child := Mux1H(prs1_wakeups, speculative_masks)
next_uop.iw_p1_bypass_hint := Mux1H(prs1_wakeups, bypassables)
}
when ((prs1_rebusys.reduce(_||_) || ((io.child_rebusys & slot_uop.iw_p1_speculative_child) =/= 0.U)) &&
slot_uop.lrs1_rtype === RT_FIX) {
next_uop.prs1_busy := true.B
rebusied_prs1 := true.B
}
when (prs2_wakeups.reduce(_||_)) {
next_uop.prs2_busy := false.B
next_uop.iw_p2_speculative_child := Mux1H(prs2_wakeups, speculative_masks)
next_uop.iw_p2_bypass_hint := Mux1H(prs2_wakeups, bypassables)
}
when ((prs2_rebusys.reduce(_||_) || ((io.child_rebusys & slot_uop.iw_p2_speculative_child) =/= 0.U)) &&
slot_uop.lrs2_rtype === RT_FIX) {
next_uop.prs2_busy := true.B
rebusied_prs2 := true.B
}
when (prs3_wakeups.reduce(_||_)) {
next_uop.prs3_busy := false.B
next_uop.iw_p3_bypass_hint := Mux1H(prs3_wakeups, bypassables)
}
when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === slot_uop.ppred) {
next_uop.ppred_busy := false.B
}
val iss_ready = !slot_uop.prs1_busy && !slot_uop.prs2_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && !(slot_uop.prs3_busy && isFp.B)
val agen_ready = (slot_uop.fu_code(FC_AGEN) && !slot_uop.prs1_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && isMem.B)
val dgen_ready = (slot_uop.fu_code(FC_DGEN) && !slot_uop.prs2_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && isMem.B)
io.request := slot_valid && !slot_uop.iw_issued && (
iss_ready || agen_ready || dgen_ready
)
io.iss_uop := slot_uop
// Update state for current micro-op based on grant
next_uop.iw_issued := false.B
next_uop.iw_issued_partial_agen := false.B
next_uop.iw_issued_partial_dgen := false.B
when (io.grant && !io.squash_grant) {
next_uop.iw_issued := true.B
}
if (isMem) {
when (slot_uop.fu_code(FC_AGEN) && slot_uop.fu_code(FC_DGEN)) {
when (agen_ready) {
// Issue the AGEN, next slot entry is a DGEN
when (io.grant && !io.squash_grant) {
next_uop.iw_issued_partial_agen := true.B
}
io.iss_uop.fu_code(FC_AGEN) := true.B
io.iss_uop.fu_code(FC_DGEN) := false.B
} .otherwise {
// Issue the DGEN, next slot entry is the AGEN
when (io.grant && !io.squash_grant) {
next_uop.iw_issued_partial_dgen := true.B
}
io.iss_uop.fu_code(FC_AGEN) := false.B
io.iss_uop.fu_code(FC_DGEN) := true.B
io.iss_uop.imm_sel := IS_N
io.iss_uop.prs1 := slot_uop.prs2
io.iss_uop.lrs1_rtype := slot_uop.lrs2_rtype
io.iss_uop.iw_p1_bypass_hint := slot_uop.iw_p2_bypass_hint
}
} .elsewhen (slot_uop.fu_code(FC_DGEN)) {
io.iss_uop.imm_sel := IS_N
io.iss_uop.prs1 := slot_uop.prs2
io.iss_uop.lrs1_rtype := slot_uop.lrs2_rtype
io.iss_uop.iw_p1_bypass_hint := slot_uop.iw_p2_bypass_hint
}
io.iss_uop.lrs2_rtype := RT_X
io.iss_uop.prs2 := io.iss_uop.prs1 // helps with DCE
}
when (slot_valid && slot_uop.iw_issued) {
next_valid := rebusied
if (isMem) {
when (slot_uop.iw_issued_partial_agen) {
next_valid := true.B
when (!rebusied_prs1) {
next_uop.fu_code(FC_AGEN) := false.B
next_uop.fu_code(FC_DGEN) := true.B
}
} .elsewhen (slot_uop.iw_issued_partial_dgen) {
next_valid := true.B
when (!rebusied_prs2) {
next_uop.fu_code(FC_AGEN) := true.B
next_uop.fu_code(FC_DGEN) := false.B
}
}
}
}
}
| module IssueSlot_33( // @[issue-slot.scala:49:7]
input clock, // @[issue-slot.scala:49:7]
input reset, // @[issue-slot.scala:49:7]
output io_valid, // @[issue-slot.scala:52:14]
output io_will_be_valid, // @[issue-slot.scala:52:14]
output io_request, // @[issue-slot.scala:52:14]
input io_grant, // @[issue-slot.scala:52:14]
output [31:0] io_iss_uop_inst, // @[issue-slot.scala:52:14]
output [31:0] io_iss_uop_debug_inst, // @[issue-slot.scala:52:14]
output io_iss_uop_is_rvc, // @[issue-slot.scala:52:14]
output [39:0] io_iss_uop_debug_pc, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_0, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_1, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_2, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_3, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_0, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_1, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_2, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_3, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_4, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_5, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_6, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_7, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_8, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_9, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_issued, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_dis_col_sel, // @[issue-slot.scala:52:14]
output [15:0] io_iss_uop_br_mask, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_br_tag, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_br_type, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sfb, // @[issue-slot.scala:52:14]
output io_iss_uop_is_fence, // @[issue-slot.scala:52:14]
output io_iss_uop_is_fencei, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sfence, // @[issue-slot.scala:52:14]
output io_iss_uop_is_amo, // @[issue-slot.scala:52:14]
output io_iss_uop_is_eret, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
output io_iss_uop_is_rocc, // @[issue-slot.scala:52:14]
output io_iss_uop_is_mov, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ftq_idx, // @[issue-slot.scala:52:14]
output io_iss_uop_edge_inst, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_pc_lob, // @[issue-slot.scala:52:14]
output io_iss_uop_taken, // @[issue-slot.scala:52:14]
output io_iss_uop_imm_rename, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_imm_sel, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_pimm, // @[issue-slot.scala:52:14]
output [19:0] io_iss_uop_imm_packed, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_op1_sel, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_op2_sel, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_rob_idx, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ldq_idx, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_stq_idx, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_rxq_idx, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_pdst, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs1, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs2, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs3, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ppred, // @[issue-slot.scala:52:14]
output io_iss_uop_prs1_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_prs2_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_prs3_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_ppred_busy, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_stale_pdst, // @[issue-slot.scala:52:14]
output io_iss_uop_exception, // @[issue-slot.scala:52:14]
output [63:0] io_iss_uop_exc_cause, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_mem_cmd, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_mem_size, // @[issue-slot.scala:52:14]
output io_iss_uop_mem_signed, // @[issue-slot.scala:52:14]
output io_iss_uop_uses_ldq, // @[issue-slot.scala:52:14]
output io_iss_uop_uses_stq, // @[issue-slot.scala:52:14]
output io_iss_uop_is_unique, // @[issue-slot.scala:52:14]
output io_iss_uop_flush_on_commit, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_csr_cmd, // @[issue-slot.scala:52:14]
output io_iss_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_ldst, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs1, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs2, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs3, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_dst_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
output io_iss_uop_frs3_en, // @[issue-slot.scala:52:14]
output io_iss_uop_fcn_dw, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_fcn_op, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_val, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_fp_rm, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_typ, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
output io_iss_uop_bp_debug_if, // @[issue-slot.scala:52:14]
output io_iss_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_debug_fsrc, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_in_uop_valid, // @[issue-slot.scala:52:14]
input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:52:14]
input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_0, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_0, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_4, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_5, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_6, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_7, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_8, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_9, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_issued, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_br_type, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sfb, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_fence, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_fencei, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sfence, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_amo, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_eret, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_rocc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:52:14]
input io_in_uop_bits_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:52:14]
input io_in_uop_bits_taken, // @[issue-slot.scala:52:14]
input io_in_uop_bits_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_op2_sel, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ppred, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_exception, // @[issue-slot.scala:52:14]
input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:52:14]
input io_in_uop_bits_mem_signed, // @[issue-slot.scala:52:14]
input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:52:14]
input io_in_uop_bits_uses_stq, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_unique, // @[issue-slot.scala:52:14]
input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_csr_cmd, // @[issue-slot.scala:52:14]
input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_in_uop_bits_frs3_en, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_fcn_op, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_typ, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:52:14]
output [31:0] io_out_uop_inst, // @[issue-slot.scala:52:14]
output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:52:14]
output io_out_uop_is_rvc, // @[issue-slot.scala:52:14]
output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_0, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_1, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_2, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_3, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_0, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_1, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_2, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_3, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_4, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_5, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_6, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_7, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_8, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_9, // @[issue-slot.scala:52:14]
output io_out_uop_iw_issued, // @[issue-slot.scala:52:14]
output io_out_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
output io_out_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_dis_col_sel, // @[issue-slot.scala:52:14]
output [15:0] io_out_uop_br_mask, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_br_tag, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_br_type, // @[issue-slot.scala:52:14]
output io_out_uop_is_sfb, // @[issue-slot.scala:52:14]
output io_out_uop_is_fence, // @[issue-slot.scala:52:14]
output io_out_uop_is_fencei, // @[issue-slot.scala:52:14]
output io_out_uop_is_sfence, // @[issue-slot.scala:52:14]
output io_out_uop_is_amo, // @[issue-slot.scala:52:14]
output io_out_uop_is_eret, // @[issue-slot.scala:52:14]
output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
output io_out_uop_is_rocc, // @[issue-slot.scala:52:14]
output io_out_uop_is_mov, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ftq_idx, // @[issue-slot.scala:52:14]
output io_out_uop_edge_inst, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:52:14]
output io_out_uop_taken, // @[issue-slot.scala:52:14]
output io_out_uop_imm_rename, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_imm_sel, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_pimm, // @[issue-slot.scala:52:14]
output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_op1_sel, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_op2_sel, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_rob_idx, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ldq_idx, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_stq_idx, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_pdst, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs1, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs2, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs3, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ppred, // @[issue-slot.scala:52:14]
output io_out_uop_prs1_busy, // @[issue-slot.scala:52:14]
output io_out_uop_prs2_busy, // @[issue-slot.scala:52:14]
output io_out_uop_prs3_busy, // @[issue-slot.scala:52:14]
output io_out_uop_ppred_busy, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_stale_pdst, // @[issue-slot.scala:52:14]
output io_out_uop_exception, // @[issue-slot.scala:52:14]
output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:52:14]
output io_out_uop_mem_signed, // @[issue-slot.scala:52:14]
output io_out_uop_uses_ldq, // @[issue-slot.scala:52:14]
output io_out_uop_uses_stq, // @[issue-slot.scala:52:14]
output io_out_uop_is_unique, // @[issue-slot.scala:52:14]
output io_out_uop_flush_on_commit, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_csr_cmd, // @[issue-slot.scala:52:14]
output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_ldst, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
output io_out_uop_frs3_en, // @[issue-slot.scala:52:14]
output io_out_uop_fcn_dw, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_fcn_op, // @[issue-slot.scala:52:14]
output io_out_uop_fp_val, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_fp_rm, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_typ, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
output io_out_uop_bp_debug_if, // @[issue-slot.scala:52:14]
output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input [15:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:52:14]
input [15:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:52:14]
input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_br_type, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_eret, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_taken, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_mispredict, // @[issue-slot.scala:52:14]
input io_brupdate_b2_taken, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:52:14]
input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:52:14]
input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:52:14]
input io_kill, // @[issue-slot.scala:52:14]
input io_clear, // @[issue-slot.scala:52:14]
input io_squash_grant, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_0_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_0_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_0_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_0_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_0_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_0_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_bypassable, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_speculative_mask, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_rebusy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_1_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_1_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_1_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_1_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_1_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_1_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_2_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_2_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_2_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_2_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_2_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_2_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_2_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_2_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_3_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_3_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_3_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_3_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_3_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_3_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_3_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_3_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_4_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_4_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_4_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_4_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_4_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_4_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_4_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_4_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input [2:0] io_child_rebusys // @[issue-slot.scala:52:14]
);
wire [15:0] next_uop_out_br_mask; // @[util.scala:104:23]
wire io_grant_0 = io_grant; // @[issue-slot.scala:49:7]
wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_0_0 = io_in_uop_bits_iq_type_0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_1_0 = io_in_uop_bits_iq_type_1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_2_0 = io_in_uop_bits_iq_type_2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_3_0 = io_in_uop_bits_iq_type_3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_0_0 = io_in_uop_bits_fu_code_0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_1_0 = io_in_uop_bits_fu_code_1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_2_0 = io_in_uop_bits_fu_code_2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_3_0 = io_in_uop_bits_fu_code_3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_4_0 = io_in_uop_bits_fu_code_4; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_5_0 = io_in_uop_bits_fu_code_5; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_6_0 = io_in_uop_bits_fu_code_6; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_7_0 = io_in_uop_bits_fu_code_7; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_8_0 = io_in_uop_bits_fu_code_8; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_9_0 = io_in_uop_bits_fu_code_9; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_0 = io_in_uop_bits_iw_issued; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_partial_agen_0 = io_in_uop_bits_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_partial_dgen_0 = io_in_uop_bits_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_iw_p1_speculative_child_0 = io_in_uop_bits_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_iw_p2_speculative_child_0 = io_in_uop_bits_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p1_bypass_hint_0 = io_in_uop_bits_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p2_bypass_hint_0 = io_in_uop_bits_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p3_bypass_hint_0 = io_in_uop_bits_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_dis_col_sel_0 = io_in_uop_bits_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_br_type_0 = io_in_uop_bits_br_type; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sfence_0 = io_in_uop_bits_is_sfence; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_eret_0 = io_in_uop_bits_is_eret; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_rocc_0 = io_in_uop_bits_is_rocc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_mov_0 = io_in_uop_bits_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_imm_rename_0 = io_in_uop_bits_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_imm_sel_0 = io_in_uop_bits_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_pimm_0 = io_in_uop_bits_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_op1_sel_0 = io_in_uop_bits_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_op2_sel_0 = io_in_uop_bits_op2_sel; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ldst_0 = io_in_uop_bits_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_wen_0 = io_in_uop_bits_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren1_0 = io_in_uop_bits_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren2_0 = io_in_uop_bits_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren3_0 = io_in_uop_bits_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_swap12_0 = io_in_uop_bits_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_swap23_0 = io_in_uop_bits_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_ctrl_typeTagIn_0 = io_in_uop_bits_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_ctrl_typeTagOut_0 = io_in_uop_bits_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fromint_0 = io_in_uop_bits_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_toint_0 = io_in_uop_bits_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fastpipe_0 = io_in_uop_bits_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fma_0 = io_in_uop_bits_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_div_0 = io_in_uop_bits_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_sqrt_0 = io_in_uop_bits_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_wflags_0 = io_in_uop_bits_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_vec_0 = io_in_uop_bits_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_csr_cmd_0 = io_in_uop_bits_csr_cmd; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fcn_dw_0 = io_in_uop_bits_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_fcn_op_0 = io_in_uop_bits_fcn_op; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_fp_rm_0 = io_in_uop_bits_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_typ_0 = io_in_uop_bits_fp_typ; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:49:7]
wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:49:7]
wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:49:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_0_0 = io_brupdate_b2_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_1_0 = io_brupdate_b2_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_2_0 = io_brupdate_b2_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_3_0 = io_brupdate_b2_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_0_0 = io_brupdate_b2_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_1_0 = io_brupdate_b2_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_2_0 = io_brupdate_b2_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_3_0 = io_brupdate_b2_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_4_0 = io_brupdate_b2_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_5_0 = io_brupdate_b2_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_6_0 = io_brupdate_b2_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_7_0 = io_brupdate_b2_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_8_0 = io_brupdate_b2_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_9_0 = io_brupdate_b2_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_0 = io_brupdate_b2_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen_0 = io_brupdate_b2_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen_0 = io_brupdate_b2_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_iw_p1_speculative_child_0 = io_brupdate_b2_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_iw_p2_speculative_child_0 = io_brupdate_b2_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint_0 = io_brupdate_b2_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint_0 = io_brupdate_b2_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint_0 = io_brupdate_b2_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_dis_col_sel_0 = io_brupdate_b2_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_br_type_0 = io_brupdate_b2_uop_br_type; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sfence_0 = io_brupdate_b2_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_eret_0 = io_brupdate_b2_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_rocc_0 = io_brupdate_b2_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_mov_0 = io_brupdate_b2_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_imm_rename_0 = io_brupdate_b2_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_imm_sel_0 = io_brupdate_b2_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_pimm_0 = io_brupdate_b2_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_op1_sel_0 = io_brupdate_b2_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_op2_sel_0 = io_brupdate_b2_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst_0 = io_brupdate_b2_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_wen_0 = io_brupdate_b2_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1_0 = io_brupdate_b2_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2_0 = io_brupdate_b2_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3_0 = io_brupdate_b2_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12_0 = io_brupdate_b2_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23_0 = io_brupdate_b2_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn_0 = io_brupdate_b2_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut_0 = io_brupdate_b2_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint_0 = io_brupdate_b2_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_toint_0 = io_brupdate_b2_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe_0 = io_brupdate_b2_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fma_0 = io_brupdate_b2_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_div_0 = io_brupdate_b2_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt_0 = io_brupdate_b2_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags_0 = io_brupdate_b2_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_vec_0 = io_brupdate_b2_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_csr_cmd_0 = io_brupdate_b2_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fcn_dw_0 = io_brupdate_b2_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_fcn_op_0 = io_brupdate_b2_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_fp_rm_0 = io_brupdate_b2_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_typ_0 = io_brupdate_b2_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:49:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:49:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:49:7]
wire io_kill_0 = io_kill; // @[issue-slot.scala:49:7]
wire io_clear_0 = io_clear; // @[issue-slot.scala:49:7]
wire io_squash_grant_0 = io_squash_grant; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_0_bits_uop_inst_0 = io_wakeup_ports_0_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_0_bits_uop_debug_inst_0 = io_wakeup_ports_0_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_rvc_0 = io_wakeup_ports_0_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_0_bits_uop_debug_pc_0 = io_wakeup_ports_0_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_0_0 = io_wakeup_ports_0_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_1_0 = io_wakeup_ports_0_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_2_0 = io_wakeup_ports_0_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_3_0 = io_wakeup_ports_0_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_0_0 = io_wakeup_ports_0_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_1_0 = io_wakeup_ports_0_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_2_0 = io_wakeup_ports_0_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_3_0 = io_wakeup_ports_0_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_4_0 = io_wakeup_ports_0_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_5_0 = io_wakeup_ports_0_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_6_0 = io_wakeup_ports_0_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_7_0 = io_wakeup_ports_0_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_8_0 = io_wakeup_ports_0_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_9_0 = io_wakeup_ports_0_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_0 = io_wakeup_ports_0_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_partial_agen_0 = io_wakeup_ports_0_bits_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen_0 = io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_0_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_0_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_dis_col_sel_0 = io_wakeup_ports_0_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_0_bits_uop_br_mask_0 = io_wakeup_ports_0_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_br_tag_0 = io_wakeup_ports_0_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_br_type_0 = io_wakeup_ports_0_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sfb_0 = io_wakeup_ports_0_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_fence_0 = io_wakeup_ports_0_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_fencei_0 = io_wakeup_ports_0_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sfence_0 = io_wakeup_ports_0_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_amo_0 = io_wakeup_ports_0_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_eret_0 = io_wakeup_ports_0_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_0_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_rocc_0 = io_wakeup_ports_0_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_mov_0 = io_wakeup_ports_0_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ftq_idx_0 = io_wakeup_ports_0_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_edge_inst_0 = io_wakeup_ports_0_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_pc_lob_0 = io_wakeup_ports_0_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_taken_0 = io_wakeup_ports_0_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_imm_rename_0 = io_wakeup_ports_0_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_imm_sel_0 = io_wakeup_ports_0_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_pimm_0 = io_wakeup_ports_0_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_0_bits_uop_imm_packed_0 = io_wakeup_ports_0_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_op1_sel_0 = io_wakeup_ports_0_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_op2_sel_0 = io_wakeup_ports_0_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_rob_idx_0 = io_wakeup_ports_0_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ldq_idx_0 = io_wakeup_ports_0_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_stq_idx_0 = io_wakeup_ports_0_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_rxq_idx_0 = io_wakeup_ports_0_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_pdst_0 = io_wakeup_ports_0_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs1_0 = io_wakeup_ports_0_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs2_0 = io_wakeup_ports_0_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs3_0 = io_wakeup_ports_0_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ppred_0 = io_wakeup_ports_0_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs1_busy_0 = io_wakeup_ports_0_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs2_busy_0 = io_wakeup_ports_0_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs3_busy_0 = io_wakeup_ports_0_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_ppred_busy_0 = io_wakeup_ports_0_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_stale_pdst_0 = io_wakeup_ports_0_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_exception_0 = io_wakeup_ports_0_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_0_bits_uop_exc_cause_0 = io_wakeup_ports_0_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_mem_cmd_0 = io_wakeup_ports_0_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_mem_size_0 = io_wakeup_ports_0_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_mem_signed_0 = io_wakeup_ports_0_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_uses_ldq_0 = io_wakeup_ports_0_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_uses_stq_0 = io_wakeup_ports_0_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_unique_0 = io_wakeup_ports_0_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_flush_on_commit_0 = io_wakeup_ports_0_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_csr_cmd_0 = io_wakeup_ports_0_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_0_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_ldst_0 = io_wakeup_ports_0_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs1_0 = io_wakeup_ports_0_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs2_0 = io_wakeup_ports_0_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs3_0 = io_wakeup_ports_0_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_dst_rtype_0 = io_wakeup_ports_0_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_lrs1_rtype_0 = io_wakeup_ports_0_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_lrs2_rtype_0 = io_wakeup_ports_0_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_frs3_en_0 = io_wakeup_ports_0_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fcn_dw_0 = io_wakeup_ports_0_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_fcn_op_0 = io_wakeup_ports_0_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_val_0 = io_wakeup_ports_0_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_fp_rm_0 = io_wakeup_ports_0_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_typ_0 = io_wakeup_ports_0_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_0_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_0_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_0_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_bp_debug_if_0 = io_wakeup_ports_0_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_0_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_debug_fsrc_0 = io_wakeup_ports_0_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_debug_tsrc_0 = io_wakeup_ports_0_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_bypassable_0 = io_wakeup_ports_0_bits_bypassable; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_speculative_mask_0 = io_wakeup_ports_0_bits_speculative_mask; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_rebusy_0 = io_wakeup_ports_0_bits_rebusy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_1_bits_uop_inst_0 = io_wakeup_ports_1_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_1_bits_uop_debug_inst_0 = io_wakeup_ports_1_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_rvc_0 = io_wakeup_ports_1_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_1_bits_uop_debug_pc_0 = io_wakeup_ports_1_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_0_0 = io_wakeup_ports_1_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_1_0 = io_wakeup_ports_1_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_2_0 = io_wakeup_ports_1_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_3_0 = io_wakeup_ports_1_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_0_0 = io_wakeup_ports_1_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_1_0 = io_wakeup_ports_1_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_2_0 = io_wakeup_ports_1_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_3_0 = io_wakeup_ports_1_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_4_0 = io_wakeup_ports_1_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_5_0 = io_wakeup_ports_1_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_6_0 = io_wakeup_ports_1_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_7_0 = io_wakeup_ports_1_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_8_0 = io_wakeup_ports_1_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_9_0 = io_wakeup_ports_1_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_0 = io_wakeup_ports_1_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_partial_agen_0 = io_wakeup_ports_1_bits_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen_0 = io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_1_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_1_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_dis_col_sel_0 = io_wakeup_ports_1_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_1_bits_uop_br_mask_0 = io_wakeup_ports_1_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_br_tag_0 = io_wakeup_ports_1_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_br_type_0 = io_wakeup_ports_1_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sfb_0 = io_wakeup_ports_1_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_fence_0 = io_wakeup_ports_1_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_fencei_0 = io_wakeup_ports_1_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sfence_0 = io_wakeup_ports_1_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_amo_0 = io_wakeup_ports_1_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_eret_0 = io_wakeup_ports_1_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_1_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_rocc_0 = io_wakeup_ports_1_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_mov_0 = io_wakeup_ports_1_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ftq_idx_0 = io_wakeup_ports_1_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_edge_inst_0 = io_wakeup_ports_1_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_pc_lob_0 = io_wakeup_ports_1_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_taken_0 = io_wakeup_ports_1_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_imm_rename_0 = io_wakeup_ports_1_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_imm_sel_0 = io_wakeup_ports_1_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_pimm_0 = io_wakeup_ports_1_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_1_bits_uop_imm_packed_0 = io_wakeup_ports_1_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_op1_sel_0 = io_wakeup_ports_1_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_op2_sel_0 = io_wakeup_ports_1_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_rob_idx_0 = io_wakeup_ports_1_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ldq_idx_0 = io_wakeup_ports_1_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_stq_idx_0 = io_wakeup_ports_1_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_rxq_idx_0 = io_wakeup_ports_1_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_pdst_0 = io_wakeup_ports_1_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs1_0 = io_wakeup_ports_1_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs2_0 = io_wakeup_ports_1_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs3_0 = io_wakeup_ports_1_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ppred_0 = io_wakeup_ports_1_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs1_busy_0 = io_wakeup_ports_1_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs2_busy_0 = io_wakeup_ports_1_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs3_busy_0 = io_wakeup_ports_1_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_ppred_busy_0 = io_wakeup_ports_1_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_stale_pdst_0 = io_wakeup_ports_1_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_exception_0 = io_wakeup_ports_1_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_1_bits_uop_exc_cause_0 = io_wakeup_ports_1_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_mem_cmd_0 = io_wakeup_ports_1_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_mem_size_0 = io_wakeup_ports_1_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_mem_signed_0 = io_wakeup_ports_1_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_uses_ldq_0 = io_wakeup_ports_1_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_uses_stq_0 = io_wakeup_ports_1_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_unique_0 = io_wakeup_ports_1_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_flush_on_commit_0 = io_wakeup_ports_1_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_csr_cmd_0 = io_wakeup_ports_1_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_1_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_ldst_0 = io_wakeup_ports_1_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs1_0 = io_wakeup_ports_1_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs2_0 = io_wakeup_ports_1_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs3_0 = io_wakeup_ports_1_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_dst_rtype_0 = io_wakeup_ports_1_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_lrs1_rtype_0 = io_wakeup_ports_1_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_lrs2_rtype_0 = io_wakeup_ports_1_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_frs3_en_0 = io_wakeup_ports_1_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fcn_dw_0 = io_wakeup_ports_1_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_fcn_op_0 = io_wakeup_ports_1_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_val_0 = io_wakeup_ports_1_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_fp_rm_0 = io_wakeup_ports_1_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_typ_0 = io_wakeup_ports_1_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_1_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_1_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_1_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_bp_debug_if_0 = io_wakeup_ports_1_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_1_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_debug_fsrc_0 = io_wakeup_ports_1_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_debug_tsrc_0 = io_wakeup_ports_1_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_valid_0 = io_wakeup_ports_2_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_2_bits_uop_inst_0 = io_wakeup_ports_2_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_2_bits_uop_debug_inst_0 = io_wakeup_ports_2_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_rvc_0 = io_wakeup_ports_2_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_2_bits_uop_debug_pc_0 = io_wakeup_ports_2_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_0_0 = io_wakeup_ports_2_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_1_0 = io_wakeup_ports_2_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_2_0 = io_wakeup_ports_2_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_3_0 = io_wakeup_ports_2_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_0_0 = io_wakeup_ports_2_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_1_0 = io_wakeup_ports_2_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_2_0 = io_wakeup_ports_2_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_3_0 = io_wakeup_ports_2_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_4_0 = io_wakeup_ports_2_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_5_0 = io_wakeup_ports_2_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_6_0 = io_wakeup_ports_2_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_7_0 = io_wakeup_ports_2_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_8_0 = io_wakeup_ports_2_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_9_0 = io_wakeup_ports_2_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_0 = io_wakeup_ports_2_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_2_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_2_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_dis_col_sel_0 = io_wakeup_ports_2_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_2_bits_uop_br_mask_0 = io_wakeup_ports_2_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_2_bits_uop_br_tag_0 = io_wakeup_ports_2_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_2_bits_uop_br_type_0 = io_wakeup_ports_2_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sfb_0 = io_wakeup_ports_2_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_fence_0 = io_wakeup_ports_2_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_fencei_0 = io_wakeup_ports_2_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sfence_0 = io_wakeup_ports_2_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_amo_0 = io_wakeup_ports_2_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_eret_0 = io_wakeup_ports_2_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_2_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_rocc_0 = io_wakeup_ports_2_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_mov_0 = io_wakeup_ports_2_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ftq_idx_0 = io_wakeup_ports_2_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_edge_inst_0 = io_wakeup_ports_2_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_pc_lob_0 = io_wakeup_ports_2_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_taken_0 = io_wakeup_ports_2_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_imm_rename_0 = io_wakeup_ports_2_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_imm_sel_0 = io_wakeup_ports_2_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_pimm_0 = io_wakeup_ports_2_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_2_bits_uop_imm_packed_0 = io_wakeup_ports_2_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_op1_sel_0 = io_wakeup_ports_2_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_op2_sel_0 = io_wakeup_ports_2_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_rob_idx_0 = io_wakeup_ports_2_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ldq_idx_0 = io_wakeup_ports_2_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_stq_idx_0 = io_wakeup_ports_2_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_rxq_idx_0 = io_wakeup_ports_2_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_pdst_0 = io_wakeup_ports_2_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs1_0 = io_wakeup_ports_2_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs2_0 = io_wakeup_ports_2_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs3_0 = io_wakeup_ports_2_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ppred_0 = io_wakeup_ports_2_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs1_busy_0 = io_wakeup_ports_2_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs2_busy_0 = io_wakeup_ports_2_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs3_busy_0 = io_wakeup_ports_2_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_ppred_busy_0 = io_wakeup_ports_2_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_stale_pdst_0 = io_wakeup_ports_2_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_exception_0 = io_wakeup_ports_2_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_2_bits_uop_exc_cause_0 = io_wakeup_ports_2_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_mem_cmd_0 = io_wakeup_ports_2_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_mem_size_0 = io_wakeup_ports_2_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_mem_signed_0 = io_wakeup_ports_2_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_uses_ldq_0 = io_wakeup_ports_2_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_uses_stq_0 = io_wakeup_ports_2_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_unique_0 = io_wakeup_ports_2_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_flush_on_commit_0 = io_wakeup_ports_2_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_csr_cmd_0 = io_wakeup_ports_2_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_2_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_ldst_0 = io_wakeup_ports_2_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs1_0 = io_wakeup_ports_2_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs2_0 = io_wakeup_ports_2_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs3_0 = io_wakeup_ports_2_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_dst_rtype_0 = io_wakeup_ports_2_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_lrs1_rtype_0 = io_wakeup_ports_2_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_lrs2_rtype_0 = io_wakeup_ports_2_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_frs3_en_0 = io_wakeup_ports_2_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fcn_dw_0 = io_wakeup_ports_2_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_fcn_op_0 = io_wakeup_ports_2_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_val_0 = io_wakeup_ports_2_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_fp_rm_0 = io_wakeup_ports_2_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_typ_0 = io_wakeup_ports_2_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_2_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_2_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_2_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_bp_debug_if_0 = io_wakeup_ports_2_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_2_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_debug_fsrc_0 = io_wakeup_ports_2_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_debug_tsrc_0 = io_wakeup_ports_2_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_valid_0 = io_wakeup_ports_3_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_3_bits_uop_inst_0 = io_wakeup_ports_3_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_3_bits_uop_debug_inst_0 = io_wakeup_ports_3_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_rvc_0 = io_wakeup_ports_3_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_3_bits_uop_debug_pc_0 = io_wakeup_ports_3_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_0_0 = io_wakeup_ports_3_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_1_0 = io_wakeup_ports_3_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_2_0 = io_wakeup_ports_3_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_3_0 = io_wakeup_ports_3_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_0_0 = io_wakeup_ports_3_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_1_0 = io_wakeup_ports_3_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_2_0 = io_wakeup_ports_3_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_3_0 = io_wakeup_ports_3_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_4_0 = io_wakeup_ports_3_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_5_0 = io_wakeup_ports_3_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_6_0 = io_wakeup_ports_3_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_7_0 = io_wakeup_ports_3_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_8_0 = io_wakeup_ports_3_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_9_0 = io_wakeup_ports_3_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_0 = io_wakeup_ports_3_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_3_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_3_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_dis_col_sel_0 = io_wakeup_ports_3_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_3_bits_uop_br_mask_0 = io_wakeup_ports_3_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_3_bits_uop_br_tag_0 = io_wakeup_ports_3_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_3_bits_uop_br_type_0 = io_wakeup_ports_3_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sfb_0 = io_wakeup_ports_3_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_fence_0 = io_wakeup_ports_3_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_fencei_0 = io_wakeup_ports_3_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sfence_0 = io_wakeup_ports_3_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_amo_0 = io_wakeup_ports_3_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_eret_0 = io_wakeup_ports_3_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_3_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_rocc_0 = io_wakeup_ports_3_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_mov_0 = io_wakeup_ports_3_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ftq_idx_0 = io_wakeup_ports_3_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_edge_inst_0 = io_wakeup_ports_3_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_pc_lob_0 = io_wakeup_ports_3_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_taken_0 = io_wakeup_ports_3_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_imm_rename_0 = io_wakeup_ports_3_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_imm_sel_0 = io_wakeup_ports_3_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_pimm_0 = io_wakeup_ports_3_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_3_bits_uop_imm_packed_0 = io_wakeup_ports_3_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_op1_sel_0 = io_wakeup_ports_3_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_op2_sel_0 = io_wakeup_ports_3_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_rob_idx_0 = io_wakeup_ports_3_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ldq_idx_0 = io_wakeup_ports_3_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_stq_idx_0 = io_wakeup_ports_3_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_rxq_idx_0 = io_wakeup_ports_3_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_pdst_0 = io_wakeup_ports_3_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs1_0 = io_wakeup_ports_3_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs2_0 = io_wakeup_ports_3_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs3_0 = io_wakeup_ports_3_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ppred_0 = io_wakeup_ports_3_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs1_busy_0 = io_wakeup_ports_3_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs2_busy_0 = io_wakeup_ports_3_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs3_busy_0 = io_wakeup_ports_3_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_ppred_busy_0 = io_wakeup_ports_3_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_stale_pdst_0 = io_wakeup_ports_3_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_exception_0 = io_wakeup_ports_3_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_3_bits_uop_exc_cause_0 = io_wakeup_ports_3_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_mem_cmd_0 = io_wakeup_ports_3_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_mem_size_0 = io_wakeup_ports_3_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_mem_signed_0 = io_wakeup_ports_3_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_uses_ldq_0 = io_wakeup_ports_3_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_uses_stq_0 = io_wakeup_ports_3_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_unique_0 = io_wakeup_ports_3_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_flush_on_commit_0 = io_wakeup_ports_3_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_csr_cmd_0 = io_wakeup_ports_3_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_3_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_ldst_0 = io_wakeup_ports_3_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs1_0 = io_wakeup_ports_3_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs2_0 = io_wakeup_ports_3_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs3_0 = io_wakeup_ports_3_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_dst_rtype_0 = io_wakeup_ports_3_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_lrs1_rtype_0 = io_wakeup_ports_3_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_lrs2_rtype_0 = io_wakeup_ports_3_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_frs3_en_0 = io_wakeup_ports_3_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fcn_dw_0 = io_wakeup_ports_3_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_fcn_op_0 = io_wakeup_ports_3_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_val_0 = io_wakeup_ports_3_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_fp_rm_0 = io_wakeup_ports_3_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_typ_0 = io_wakeup_ports_3_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_3_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_3_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_3_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_bp_debug_if_0 = io_wakeup_ports_3_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_3_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_debug_fsrc_0 = io_wakeup_ports_3_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_debug_tsrc_0 = io_wakeup_ports_3_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_valid_0 = io_wakeup_ports_4_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_4_bits_uop_inst_0 = io_wakeup_ports_4_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_4_bits_uop_debug_inst_0 = io_wakeup_ports_4_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_rvc_0 = io_wakeup_ports_4_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_4_bits_uop_debug_pc_0 = io_wakeup_ports_4_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_0_0 = io_wakeup_ports_4_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_1_0 = io_wakeup_ports_4_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_2_0 = io_wakeup_ports_4_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_3_0 = io_wakeup_ports_4_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_0_0 = io_wakeup_ports_4_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_1_0 = io_wakeup_ports_4_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_2_0 = io_wakeup_ports_4_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_3_0 = io_wakeup_ports_4_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_4_0 = io_wakeup_ports_4_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_5_0 = io_wakeup_ports_4_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_6_0 = io_wakeup_ports_4_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_7_0 = io_wakeup_ports_4_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_8_0 = io_wakeup_ports_4_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_9_0 = io_wakeup_ports_4_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_issued_0 = io_wakeup_ports_4_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_4_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_4_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_4_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_4_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_4_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_dis_col_sel_0 = io_wakeup_ports_4_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_4_bits_uop_br_mask_0 = io_wakeup_ports_4_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_4_bits_uop_br_tag_0 = io_wakeup_ports_4_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_4_bits_uop_br_type_0 = io_wakeup_ports_4_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_sfb_0 = io_wakeup_ports_4_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_fence_0 = io_wakeup_ports_4_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_fencei_0 = io_wakeup_ports_4_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_sfence_0 = io_wakeup_ports_4_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_amo_0 = io_wakeup_ports_4_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_eret_0 = io_wakeup_ports_4_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_4_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_rocc_0 = io_wakeup_ports_4_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_mov_0 = io_wakeup_ports_4_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_ftq_idx_0 = io_wakeup_ports_4_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_edge_inst_0 = io_wakeup_ports_4_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_pc_lob_0 = io_wakeup_ports_4_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_taken_0 = io_wakeup_ports_4_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_imm_rename_0 = io_wakeup_ports_4_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_imm_sel_0 = io_wakeup_ports_4_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_pimm_0 = io_wakeup_ports_4_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_4_bits_uop_imm_packed_0 = io_wakeup_ports_4_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_op1_sel_0 = io_wakeup_ports_4_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_op2_sel_0 = io_wakeup_ports_4_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_rob_idx_0 = io_wakeup_ports_4_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_ldq_idx_0 = io_wakeup_ports_4_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_stq_idx_0 = io_wakeup_ports_4_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_rxq_idx_0 = io_wakeup_ports_4_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_pdst_0 = io_wakeup_ports_4_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_prs1_0 = io_wakeup_ports_4_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_prs2_0 = io_wakeup_ports_4_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_prs3_0 = io_wakeup_ports_4_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_ppred_0 = io_wakeup_ports_4_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_prs1_busy_0 = io_wakeup_ports_4_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_prs2_busy_0 = io_wakeup_ports_4_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_prs3_busy_0 = io_wakeup_ports_4_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_ppred_busy_0 = io_wakeup_ports_4_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_stale_pdst_0 = io_wakeup_ports_4_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_exception_0 = io_wakeup_ports_4_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_4_bits_uop_exc_cause_0 = io_wakeup_ports_4_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_mem_cmd_0 = io_wakeup_ports_4_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_mem_size_0 = io_wakeup_ports_4_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_mem_signed_0 = io_wakeup_ports_4_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_uses_ldq_0 = io_wakeup_ports_4_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_uses_stq_0 = io_wakeup_ports_4_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_unique_0 = io_wakeup_ports_4_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_flush_on_commit_0 = io_wakeup_ports_4_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_csr_cmd_0 = io_wakeup_ports_4_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_4_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_ldst_0 = io_wakeup_ports_4_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_lrs1_0 = io_wakeup_ports_4_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_lrs2_0 = io_wakeup_ports_4_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_lrs3_0 = io_wakeup_ports_4_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_dst_rtype_0 = io_wakeup_ports_4_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_lrs1_rtype_0 = io_wakeup_ports_4_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_lrs2_rtype_0 = io_wakeup_ports_4_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_frs3_en_0 = io_wakeup_ports_4_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fcn_dw_0 = io_wakeup_ports_4_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_fcn_op_0 = io_wakeup_ports_4_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_val_0 = io_wakeup_ports_4_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_fp_rm_0 = io_wakeup_ports_4_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_fp_typ_0 = io_wakeup_ports_4_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_4_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_4_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_4_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_bp_debug_if_0 = io_wakeup_ports_4_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_4_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_debug_fsrc_0 = io_wakeup_ports_4_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_debug_tsrc_0 = io_wakeup_ports_4_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_child_rebusys_0 = io_child_rebusys; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_bypassable = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_pred_wakeup_port_valid = 1'h0; // @[issue-slot.scala:49:7]
wire prs1_rebusys_1 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_2 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_3 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_4 = 1'h0; // @[issue-slot.scala:102:91]
wire prs2_rebusys_1 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_2 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_3 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_4 = 1'h0; // @[issue-slot.scala:103:91]
wire _next_uop_iw_p1_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _next_uop_iw_p2_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _next_uop_iw_p3_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _iss_ready_T_6 = 1'h0; // @[issue-slot.scala:136:131]
wire [1:0] io_iss_uop_lrs2_rtype = 2'h2; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_speculative_mask = 3'h0; // @[issue-slot.scala:49:7]
wire [2:0] _next_uop_iw_p1_speculative_child_T_1 = 3'h0; // @[Mux.scala:30:73]
wire [2:0] _next_uop_iw_p2_speculative_child_T_1 = 3'h0; // @[Mux.scala:30:73]
wire io_wakeup_ports_2_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire _iss_ready_T_7 = 1'h1; // @[issue-slot.scala:136:110]
wire [2:0] io_wakeup_ports_2_bits_speculative_mask = 3'h1; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_speculative_mask = 3'h2; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_speculative_mask = 3'h4; // @[issue-slot.scala:49:7]
wire [4:0] io_pred_wakeup_port_bits = 5'h0; // @[issue-slot.scala:49:7]
wire _io_will_be_valid_T_1; // @[issue-slot.scala:65:34]
wire _io_request_T_4; // @[issue-slot.scala:140:51]
wire [6:0] io_iss_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs2_0 = io_iss_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [31:0] next_uop_inst; // @[issue-slot.scala:59:28]
wire [31:0] next_uop_debug_inst; // @[issue-slot.scala:59:28]
wire next_uop_is_rvc; // @[issue-slot.scala:59:28]
wire [39:0] next_uop_debug_pc; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_0; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_1; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_2; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_3; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_0; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_1; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_2; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_3; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_4; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_5; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_6; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_7; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_8; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_9; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued_partial_agen; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued_partial_dgen; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_iw_p1_speculative_child; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_iw_p2_speculative_child; // @[issue-slot.scala:59:28]
wire next_uop_iw_p1_bypass_hint; // @[issue-slot.scala:59:28]
wire next_uop_iw_p2_bypass_hint; // @[issue-slot.scala:59:28]
wire next_uop_iw_p3_bypass_hint; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_dis_col_sel; // @[issue-slot.scala:59:28]
wire [15:0] next_uop_br_mask; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_br_tag; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_br_type; // @[issue-slot.scala:59:28]
wire next_uop_is_sfb; // @[issue-slot.scala:59:28]
wire next_uop_is_fence; // @[issue-slot.scala:59:28]
wire next_uop_is_fencei; // @[issue-slot.scala:59:28]
wire next_uop_is_sfence; // @[issue-slot.scala:59:28]
wire next_uop_is_amo; // @[issue-slot.scala:59:28]
wire next_uop_is_eret; // @[issue-slot.scala:59:28]
wire next_uop_is_sys_pc2epc; // @[issue-slot.scala:59:28]
wire next_uop_is_rocc; // @[issue-slot.scala:59:28]
wire next_uop_is_mov; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ftq_idx; // @[issue-slot.scala:59:28]
wire next_uop_edge_inst; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_pc_lob; // @[issue-slot.scala:59:28]
wire next_uop_taken; // @[issue-slot.scala:59:28]
wire next_uop_imm_rename; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_imm_sel; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_pimm; // @[issue-slot.scala:59:28]
wire [19:0] next_uop_imm_packed; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_op1_sel; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_op2_sel; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ldst; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_wen; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren1; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren2; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren3; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_swap12; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_swap23; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fromint; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_toint; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fma; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_div; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_sqrt; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_wflags; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_vec; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_rob_idx; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ldq_idx; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_stq_idx; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_rxq_idx; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_pdst; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs1; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs2; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs3; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ppred; // @[issue-slot.scala:59:28]
wire next_uop_prs1_busy; // @[issue-slot.scala:59:28]
wire next_uop_prs2_busy; // @[issue-slot.scala:59:28]
wire next_uop_prs3_busy; // @[issue-slot.scala:59:28]
wire next_uop_ppred_busy; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_stale_pdst; // @[issue-slot.scala:59:28]
wire next_uop_exception; // @[issue-slot.scala:59:28]
wire [63:0] next_uop_exc_cause; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_mem_cmd; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_mem_size; // @[issue-slot.scala:59:28]
wire next_uop_mem_signed; // @[issue-slot.scala:59:28]
wire next_uop_uses_ldq; // @[issue-slot.scala:59:28]
wire next_uop_uses_stq; // @[issue-slot.scala:59:28]
wire next_uop_is_unique; // @[issue-slot.scala:59:28]
wire next_uop_flush_on_commit; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_csr_cmd; // @[issue-slot.scala:59:28]
wire next_uop_ldst_is_rs1; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_ldst; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs1; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs2; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs3; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_dst_rtype; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_lrs1_rtype; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_lrs2_rtype; // @[issue-slot.scala:59:28]
wire next_uop_frs3_en; // @[issue-slot.scala:59:28]
wire next_uop_fcn_dw; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_fcn_op; // @[issue-slot.scala:59:28]
wire next_uop_fp_val; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_fp_rm; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_typ; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_pf_if; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_ae_if; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_ma_if; // @[issue-slot.scala:59:28]
wire next_uop_bp_debug_if; // @[issue-slot.scala:59:28]
wire next_uop_bp_xcpt_if; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_debug_fsrc; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_debug_tsrc; // @[issue-slot.scala:59:28]
wire io_iss_uop_iq_type_0_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_0_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_4_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_5_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_6_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_7_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_8_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_9_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ldst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_wen_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_swap12_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_swap23_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_ctrl_typeTagIn_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_ctrl_typeTagOut_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fromint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_toint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fastpipe_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fma_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_div_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_sqrt_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_wflags_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_vec_0; // @[issue-slot.scala:49:7]
wire [31:0] io_iss_uop_inst_0; // @[issue-slot.scala:49:7]
wire [31:0] io_iss_uop_debug_inst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_rvc_0; // @[issue-slot.scala:49:7]
wire [39:0] io_iss_uop_debug_pc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_partial_agen_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_partial_dgen_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_iw_p1_speculative_child_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_iw_p2_speculative_child_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p1_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p2_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p3_bypass_hint_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_dis_col_sel_0; // @[issue-slot.scala:49:7]
wire [15:0] io_iss_uop_br_mask_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_br_tag_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_br_type_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sfb_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_fence_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_fencei_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sfence_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_amo_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_eret_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sys_pc2epc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_rocc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_mov_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ftq_idx_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_edge_inst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_pc_lob_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_taken_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_imm_rename_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_imm_sel_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_pimm_0; // @[issue-slot.scala:49:7]
wire [19:0] io_iss_uop_imm_packed_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_op1_sel_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_op2_sel_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_rob_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ldq_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_stq_idx_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_rxq_idx_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_pdst_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs3_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ppred_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs1_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs2_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs3_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_ppred_busy_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_stale_pdst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_exception_0; // @[issue-slot.scala:49:7]
wire [63:0] io_iss_uop_exc_cause_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_mem_cmd_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_mem_size_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_mem_signed_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_uses_ldq_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_uses_stq_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_unique_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_flush_on_commit_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_csr_cmd_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_ldst_is_rs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_ldst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs2_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs3_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_dst_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_lrs1_rtype_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_frs3_en_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fcn_dw_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_fcn_op_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_val_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_fp_rm_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_typ_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_pf_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_ae_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_ma_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_bp_debug_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_bp_xcpt_if_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_debug_fsrc_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_debug_tsrc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_0_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_0_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_4_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_5_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_6_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_7_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_8_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_9_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ldst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_wen_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_swap12_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_swap23_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_ctrl_typeTagIn_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_ctrl_typeTagOut_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fromint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_toint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fastpipe_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fma_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_div_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_sqrt_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_wflags_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_vec_0; // @[issue-slot.scala:49:7]
wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:49:7]
wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_rvc_0; // @[issue-slot.scala:49:7]
wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_partial_agen_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_partial_dgen_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_iw_p1_speculative_child_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_iw_p2_speculative_child_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p1_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p2_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p3_bypass_hint_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_dis_col_sel_0; // @[issue-slot.scala:49:7]
wire [15:0] io_out_uop_br_mask_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_br_tag_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_br_type_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sfb_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_fence_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_fencei_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sfence_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_amo_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_eret_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_rocc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_mov_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:49:7]
wire io_out_uop_edge_inst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:49:7]
wire io_out_uop_taken_0; // @[issue-slot.scala:49:7]
wire io_out_uop_imm_rename_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_imm_sel_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_pimm_0; // @[issue-slot.scala:49:7]
wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_op1_sel_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_op2_sel_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_pdst_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs2_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs3_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ppred_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_exception_0; // @[issue-slot.scala:49:7]
wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:49:7]
wire io_out_uop_mem_signed_0; // @[issue-slot.scala:49:7]
wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:49:7]
wire io_out_uop_uses_stq_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_unique_0; // @[issue-slot.scala:49:7]
wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_csr_cmd_0; // @[issue-slot.scala:49:7]
wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:49:7]
wire io_out_uop_frs3_en_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fcn_dw_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_fcn_op_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_val_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_fp_rm_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_typ_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:49:7]
wire io_valid_0; // @[issue-slot.scala:49:7]
wire io_will_be_valid_0; // @[issue-slot.scala:49:7]
wire io_request_0; // @[issue-slot.scala:49:7]
reg slot_valid; // @[issue-slot.scala:55:27]
assign io_valid_0 = slot_valid; // @[issue-slot.scala:49:7, :55:27]
reg [31:0] slot_uop_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:49:7, :56:21]
wire [31:0] next_uop_out_inst = slot_uop_inst; // @[util.scala:104:23]
reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:49:7, :56:21]
wire [31:0] next_uop_out_debug_inst = slot_uop_debug_inst; // @[util.scala:104:23]
reg slot_uop_is_rvc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_rvc = slot_uop_is_rvc; // @[util.scala:104:23]
reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:49:7, :56:21]
wire [39:0] next_uop_out_debug_pc = slot_uop_debug_pc; // @[util.scala:104:23]
reg slot_uop_iq_type_0; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_0_0 = slot_uop_iq_type_0; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_0 = slot_uop_iq_type_0; // @[util.scala:104:23]
reg slot_uop_iq_type_1; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_1_0 = slot_uop_iq_type_1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_1 = slot_uop_iq_type_1; // @[util.scala:104:23]
reg slot_uop_iq_type_2; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_2_0 = slot_uop_iq_type_2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_2 = slot_uop_iq_type_2; // @[util.scala:104:23]
reg slot_uop_iq_type_3; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_3_0 = slot_uop_iq_type_3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_3 = slot_uop_iq_type_3; // @[util.scala:104:23]
reg slot_uop_fu_code_0; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_0_0 = slot_uop_fu_code_0; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_0 = slot_uop_fu_code_0; // @[util.scala:104:23]
reg slot_uop_fu_code_1; // @[issue-slot.scala:56:21]
wire next_uop_out_fu_code_1 = slot_uop_fu_code_1; // @[util.scala:104:23]
reg slot_uop_fu_code_2; // @[issue-slot.scala:56:21]
wire next_uop_out_fu_code_2 = slot_uop_fu_code_2; // @[util.scala:104:23]
reg slot_uop_fu_code_3; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_3_0 = slot_uop_fu_code_3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_3 = slot_uop_fu_code_3; // @[util.scala:104:23]
reg slot_uop_fu_code_4; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_4_0 = slot_uop_fu_code_4; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_4 = slot_uop_fu_code_4; // @[util.scala:104:23]
reg slot_uop_fu_code_5; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_5_0 = slot_uop_fu_code_5; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_5 = slot_uop_fu_code_5; // @[util.scala:104:23]
reg slot_uop_fu_code_6; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_6_0 = slot_uop_fu_code_6; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_6 = slot_uop_fu_code_6; // @[util.scala:104:23]
reg slot_uop_fu_code_7; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_7_0 = slot_uop_fu_code_7; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_7 = slot_uop_fu_code_7; // @[util.scala:104:23]
reg slot_uop_fu_code_8; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_8_0 = slot_uop_fu_code_8; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_8 = slot_uop_fu_code_8; // @[util.scala:104:23]
reg slot_uop_fu_code_9; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_9_0 = slot_uop_fu_code_9; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_9 = slot_uop_fu_code_9; // @[util.scala:104:23]
reg slot_uop_iw_issued; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_issued_0 = slot_uop_iw_issued; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_issued = slot_uop_iw_issued; // @[util.scala:104:23]
reg slot_uop_iw_issued_partial_agen; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_issued_partial_agen_0 = slot_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_issued_partial_agen = slot_uop_iw_issued_partial_agen; // @[util.scala:104:23]
reg slot_uop_iw_issued_partial_dgen; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_issued_partial_dgen_0 = slot_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_issued_partial_dgen = slot_uop_iw_issued_partial_dgen; // @[util.scala:104:23]
reg [2:0] slot_uop_iw_p1_speculative_child; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p1_speculative_child_0 = slot_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_iw_p1_speculative_child = slot_uop_iw_p1_speculative_child; // @[util.scala:104:23]
reg [2:0] slot_uop_iw_p2_speculative_child; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p2_speculative_child_0 = slot_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_iw_p2_speculative_child = slot_uop_iw_p2_speculative_child; // @[util.scala:104:23]
reg slot_uop_iw_p1_bypass_hint; // @[issue-slot.scala:56:21]
wire next_uop_out_iw_p1_bypass_hint = slot_uop_iw_p1_bypass_hint; // @[util.scala:104:23]
reg slot_uop_iw_p2_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p2_bypass_hint_0 = slot_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p2_bypass_hint = slot_uop_iw_p2_bypass_hint; // @[util.scala:104:23]
reg slot_uop_iw_p3_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p3_bypass_hint_0 = slot_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p3_bypass_hint = slot_uop_iw_p3_bypass_hint; // @[util.scala:104:23]
reg [2:0] slot_uop_dis_col_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_dis_col_sel_0 = slot_uop_dis_col_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_dis_col_sel = slot_uop_dis_col_sel; // @[util.scala:104:23]
reg [15:0] slot_uop_br_mask; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:49:7, :56:21]
reg [3:0] slot_uop_br_tag; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_br_tag = slot_uop_br_tag; // @[util.scala:104:23]
reg [3:0] slot_uop_br_type; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_type_0 = slot_uop_br_type; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_br_type = slot_uop_br_type; // @[util.scala:104:23]
reg slot_uop_is_sfb; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sfb = slot_uop_is_sfb; // @[util.scala:104:23]
reg slot_uop_is_fence; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_fence = slot_uop_is_fence; // @[util.scala:104:23]
reg slot_uop_is_fencei; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_fencei = slot_uop_is_fencei; // @[util.scala:104:23]
reg slot_uop_is_sfence; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sfence_0 = slot_uop_is_sfence; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sfence = slot_uop_is_sfence; // @[util.scala:104:23]
reg slot_uop_is_amo; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_amo = slot_uop_is_amo; // @[util.scala:104:23]
reg slot_uop_is_eret; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_eret_0 = slot_uop_is_eret; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_eret = slot_uop_is_eret; // @[util.scala:104:23]
reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sys_pc2epc = slot_uop_is_sys_pc2epc; // @[util.scala:104:23]
reg slot_uop_is_rocc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_rocc_0 = slot_uop_is_rocc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_rocc = slot_uop_is_rocc; // @[util.scala:104:23]
reg slot_uop_is_mov; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_mov_0 = slot_uop_is_mov; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_mov = slot_uop_is_mov; // @[util.scala:104:23]
reg [4:0] slot_uop_ftq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ftq_idx = slot_uop_ftq_idx; // @[util.scala:104:23]
reg slot_uop_edge_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_edge_inst = slot_uop_edge_inst; // @[util.scala:104:23]
reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:56:21]
assign io_iss_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_pc_lob = slot_uop_pc_lob; // @[util.scala:104:23]
reg slot_uop_taken; // @[issue-slot.scala:56:21]
assign io_iss_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_taken = slot_uop_taken; // @[util.scala:104:23]
reg slot_uop_imm_rename; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_rename_0 = slot_uop_imm_rename; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_imm_rename = slot_uop_imm_rename; // @[util.scala:104:23]
reg [2:0] slot_uop_imm_sel; // @[issue-slot.scala:56:21]
wire [2:0] next_uop_out_imm_sel = slot_uop_imm_sel; // @[util.scala:104:23]
reg [4:0] slot_uop_pimm; // @[issue-slot.scala:56:21]
assign io_iss_uop_pimm_0 = slot_uop_pimm; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_pimm = slot_uop_pimm; // @[util.scala:104:23]
reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:49:7, :56:21]
wire [19:0] next_uop_out_imm_packed = slot_uop_imm_packed; // @[util.scala:104:23]
reg [1:0] slot_uop_op1_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_op1_sel_0 = slot_uop_op1_sel; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_op1_sel = slot_uop_op1_sel; // @[util.scala:104:23]
reg [2:0] slot_uop_op2_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_op2_sel_0 = slot_uop_op2_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_op2_sel = slot_uop_op2_sel; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ldst; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ldst_0 = slot_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ldst = slot_uop_fp_ctrl_ldst; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_wen; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_wen_0 = slot_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_wen = slot_uop_fp_ctrl_wen; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren1; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren1_0 = slot_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren1 = slot_uop_fp_ctrl_ren1; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren2; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren2_0 = slot_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren2 = slot_uop_fp_ctrl_ren2; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren3; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren3_0 = slot_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren3 = slot_uop_fp_ctrl_ren3; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_swap12; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_swap12_0 = slot_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_swap12 = slot_uop_fp_ctrl_swap12; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_swap23; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_swap23_0 = slot_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_swap23 = slot_uop_fp_ctrl_swap23; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_typeTagIn_0 = slot_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_ctrl_typeTagIn = slot_uop_fp_ctrl_typeTagIn; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_typeTagOut_0 = slot_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_ctrl_typeTagOut = slot_uop_fp_ctrl_typeTagOut; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fromint; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fromint_0 = slot_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fromint = slot_uop_fp_ctrl_fromint; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_toint; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_toint_0 = slot_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_toint = slot_uop_fp_ctrl_toint; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fastpipe_0 = slot_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fastpipe = slot_uop_fp_ctrl_fastpipe; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fma; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fma_0 = slot_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fma = slot_uop_fp_ctrl_fma; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_div; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_div_0 = slot_uop_fp_ctrl_div; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_div = slot_uop_fp_ctrl_div; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_sqrt; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_sqrt_0 = slot_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_sqrt = slot_uop_fp_ctrl_sqrt; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_wflags; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_wflags_0 = slot_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_wflags = slot_uop_fp_ctrl_wflags; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_vec; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_vec_0 = slot_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_vec = slot_uop_fp_ctrl_vec; // @[util.scala:104:23]
reg [6:0] slot_uop_rob_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_rob_idx = slot_uop_rob_idx; // @[util.scala:104:23]
reg [4:0] slot_uop_ldq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ldq_idx = slot_uop_ldq_idx; // @[util.scala:104:23]
reg [4:0] slot_uop_stq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_stq_idx = slot_uop_stq_idx; // @[util.scala:104:23]
reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_rxq_idx = slot_uop_rxq_idx; // @[util.scala:104:23]
reg [6:0] slot_uop_pdst; // @[issue-slot.scala:56:21]
assign io_iss_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_pdst = slot_uop_pdst; // @[util.scala:104:23]
reg [6:0] slot_uop_prs1; // @[issue-slot.scala:56:21]
wire [6:0] next_uop_out_prs1 = slot_uop_prs1; // @[util.scala:104:23]
reg [6:0] slot_uop_prs2; // @[issue-slot.scala:56:21]
wire [6:0] next_uop_out_prs2 = slot_uop_prs2; // @[util.scala:104:23]
reg [6:0] slot_uop_prs3; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs3 = slot_uop_prs3; // @[util.scala:104:23]
reg [4:0] slot_uop_ppred; // @[issue-slot.scala:56:21]
assign io_iss_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ppred = slot_uop_ppred; // @[util.scala:104:23]
reg slot_uop_prs1_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs1_busy = slot_uop_prs1_busy; // @[util.scala:104:23]
reg slot_uop_prs2_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs2_busy = slot_uop_prs2_busy; // @[util.scala:104:23]
reg slot_uop_prs3_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs3_busy = slot_uop_prs3_busy; // @[util.scala:104:23]
reg slot_uop_ppred_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_ppred_busy = slot_uop_ppred_busy; // @[util.scala:104:23]
wire _iss_ready_T_3 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :136:88]
wire _agen_ready_T_2 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :137:95]
wire _dgen_ready_T_2 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :138:95]
reg [6:0] slot_uop_stale_pdst; // @[issue-slot.scala:56:21]
assign io_iss_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_stale_pdst = slot_uop_stale_pdst; // @[util.scala:104:23]
reg slot_uop_exception; // @[issue-slot.scala:56:21]
assign io_iss_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_exception = slot_uop_exception; // @[util.scala:104:23]
reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:56:21]
assign io_iss_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:49:7, :56:21]
wire [63:0] next_uop_out_exc_cause = slot_uop_exc_cause; // @[util.scala:104:23]
reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_mem_cmd = slot_uop_mem_cmd; // @[util.scala:104:23]
reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_mem_size = slot_uop_mem_size; // @[util.scala:104:23]
reg slot_uop_mem_signed; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_mem_signed = slot_uop_mem_signed; // @[util.scala:104:23]
reg slot_uop_uses_ldq; // @[issue-slot.scala:56:21]
assign io_iss_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_uses_ldq = slot_uop_uses_ldq; // @[util.scala:104:23]
reg slot_uop_uses_stq; // @[issue-slot.scala:56:21]
assign io_iss_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_uses_stq = slot_uop_uses_stq; // @[util.scala:104:23]
reg slot_uop_is_unique; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_unique = slot_uop_is_unique; // @[util.scala:104:23]
reg slot_uop_flush_on_commit; // @[issue-slot.scala:56:21]
assign io_iss_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_flush_on_commit = slot_uop_flush_on_commit; // @[util.scala:104:23]
reg [2:0] slot_uop_csr_cmd; // @[issue-slot.scala:56:21]
assign io_iss_uop_csr_cmd_0 = slot_uop_csr_cmd; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_csr_cmd = slot_uop_csr_cmd; // @[util.scala:104:23]
reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_ldst_is_rs1 = slot_uop_ldst_is_rs1; // @[util.scala:104:23]
reg [5:0] slot_uop_ldst; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_ldst = slot_uop_ldst; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs1 = slot_uop_lrs1; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs2 = slot_uop_lrs2; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs3 = slot_uop_lrs3; // @[util.scala:104:23]
reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_dst_rtype = slot_uop_dst_rtype; // @[util.scala:104:23]
reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:56:21]
wire [1:0] next_uop_out_lrs1_rtype = slot_uop_lrs1_rtype; // @[util.scala:104:23]
reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:56:21]
wire [1:0] next_uop_out_lrs2_rtype = slot_uop_lrs2_rtype; // @[util.scala:104:23]
reg slot_uop_frs3_en; // @[issue-slot.scala:56:21]
assign io_iss_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_frs3_en = slot_uop_frs3_en; // @[util.scala:104:23]
reg slot_uop_fcn_dw; // @[issue-slot.scala:56:21]
assign io_iss_uop_fcn_dw_0 = slot_uop_fcn_dw; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fcn_dw = slot_uop_fcn_dw; // @[util.scala:104:23]
reg [4:0] slot_uop_fcn_op; // @[issue-slot.scala:56:21]
assign io_iss_uop_fcn_op_0 = slot_uop_fcn_op; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_fcn_op = slot_uop_fcn_op; // @[util.scala:104:23]
reg slot_uop_fp_val; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_val = slot_uop_fp_val; // @[util.scala:104:23]
reg [2:0] slot_uop_fp_rm; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_rm_0 = slot_uop_fp_rm; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_fp_rm = slot_uop_fp_rm; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_typ; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_typ_0 = slot_uop_fp_typ; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_typ = slot_uop_fp_typ; // @[util.scala:104:23]
reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_pf_if = slot_uop_xcpt_pf_if; // @[util.scala:104:23]
reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_ae_if = slot_uop_xcpt_ae_if; // @[util.scala:104:23]
reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_ma_if = slot_uop_xcpt_ma_if; // @[util.scala:104:23]
reg slot_uop_bp_debug_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_bp_debug_if = slot_uop_bp_debug_if; // @[util.scala:104:23]
reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_bp_xcpt_if = slot_uop_bp_xcpt_if; // @[util.scala:104:23]
reg [2:0] slot_uop_debug_fsrc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_debug_fsrc = slot_uop_debug_fsrc; // @[util.scala:104:23]
reg [2:0] slot_uop_debug_tsrc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_debug_tsrc = slot_uop_debug_tsrc; // @[util.scala:104:23]
wire next_valid; // @[issue-slot.scala:58:28]
assign next_uop_inst = next_uop_out_inst; // @[util.scala:104:23]
assign next_uop_debug_inst = next_uop_out_debug_inst; // @[util.scala:104:23]
assign next_uop_is_rvc = next_uop_out_is_rvc; // @[util.scala:104:23]
assign next_uop_debug_pc = next_uop_out_debug_pc; // @[util.scala:104:23]
assign next_uop_iq_type_0 = next_uop_out_iq_type_0; // @[util.scala:104:23]
assign next_uop_iq_type_1 = next_uop_out_iq_type_1; // @[util.scala:104:23]
assign next_uop_iq_type_2 = next_uop_out_iq_type_2; // @[util.scala:104:23]
assign next_uop_iq_type_3 = next_uop_out_iq_type_3; // @[util.scala:104:23]
assign next_uop_fu_code_0 = next_uop_out_fu_code_0; // @[util.scala:104:23]
assign next_uop_fu_code_3 = next_uop_out_fu_code_3; // @[util.scala:104:23]
assign next_uop_fu_code_4 = next_uop_out_fu_code_4; // @[util.scala:104:23]
assign next_uop_fu_code_5 = next_uop_out_fu_code_5; // @[util.scala:104:23]
assign next_uop_fu_code_6 = next_uop_out_fu_code_6; // @[util.scala:104:23]
assign next_uop_fu_code_7 = next_uop_out_fu_code_7; // @[util.scala:104:23]
assign next_uop_fu_code_8 = next_uop_out_fu_code_8; // @[util.scala:104:23]
assign next_uop_fu_code_9 = next_uop_out_fu_code_9; // @[util.scala:104:23]
wire [15:0] _next_uop_out_br_mask_T_1; // @[util.scala:93:25]
assign next_uop_dis_col_sel = next_uop_out_dis_col_sel; // @[util.scala:104:23]
assign next_uop_br_mask = next_uop_out_br_mask; // @[util.scala:104:23]
assign next_uop_br_tag = next_uop_out_br_tag; // @[util.scala:104:23]
assign next_uop_br_type = next_uop_out_br_type; // @[util.scala:104:23]
assign next_uop_is_sfb = next_uop_out_is_sfb; // @[util.scala:104:23]
assign next_uop_is_fence = next_uop_out_is_fence; // @[util.scala:104:23]
assign next_uop_is_fencei = next_uop_out_is_fencei; // @[util.scala:104:23]
assign next_uop_is_sfence = next_uop_out_is_sfence; // @[util.scala:104:23]
assign next_uop_is_amo = next_uop_out_is_amo; // @[util.scala:104:23]
assign next_uop_is_eret = next_uop_out_is_eret; // @[util.scala:104:23]
assign next_uop_is_sys_pc2epc = next_uop_out_is_sys_pc2epc; // @[util.scala:104:23]
assign next_uop_is_rocc = next_uop_out_is_rocc; // @[util.scala:104:23]
assign next_uop_is_mov = next_uop_out_is_mov; // @[util.scala:104:23]
assign next_uop_ftq_idx = next_uop_out_ftq_idx; // @[util.scala:104:23]
assign next_uop_edge_inst = next_uop_out_edge_inst; // @[util.scala:104:23]
assign next_uop_pc_lob = next_uop_out_pc_lob; // @[util.scala:104:23]
assign next_uop_taken = next_uop_out_taken; // @[util.scala:104:23]
assign next_uop_imm_rename = next_uop_out_imm_rename; // @[util.scala:104:23]
assign next_uop_imm_sel = next_uop_out_imm_sel; // @[util.scala:104:23]
assign next_uop_pimm = next_uop_out_pimm; // @[util.scala:104:23]
assign next_uop_imm_packed = next_uop_out_imm_packed; // @[util.scala:104:23]
assign next_uop_op1_sel = next_uop_out_op1_sel; // @[util.scala:104:23]
assign next_uop_op2_sel = next_uop_out_op2_sel; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ldst = next_uop_out_fp_ctrl_ldst; // @[util.scala:104:23]
assign next_uop_fp_ctrl_wen = next_uop_out_fp_ctrl_wen; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren1 = next_uop_out_fp_ctrl_ren1; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren2 = next_uop_out_fp_ctrl_ren2; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren3 = next_uop_out_fp_ctrl_ren3; // @[util.scala:104:23]
assign next_uop_fp_ctrl_swap12 = next_uop_out_fp_ctrl_swap12; // @[util.scala:104:23]
assign next_uop_fp_ctrl_swap23 = next_uop_out_fp_ctrl_swap23; // @[util.scala:104:23]
assign next_uop_fp_ctrl_typeTagIn = next_uop_out_fp_ctrl_typeTagIn; // @[util.scala:104:23]
assign next_uop_fp_ctrl_typeTagOut = next_uop_out_fp_ctrl_typeTagOut; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fromint = next_uop_out_fp_ctrl_fromint; // @[util.scala:104:23]
assign next_uop_fp_ctrl_toint = next_uop_out_fp_ctrl_toint; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fastpipe = next_uop_out_fp_ctrl_fastpipe; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fma = next_uop_out_fp_ctrl_fma; // @[util.scala:104:23]
assign next_uop_fp_ctrl_div = next_uop_out_fp_ctrl_div; // @[util.scala:104:23]
assign next_uop_fp_ctrl_sqrt = next_uop_out_fp_ctrl_sqrt; // @[util.scala:104:23]
assign next_uop_fp_ctrl_wflags = next_uop_out_fp_ctrl_wflags; // @[util.scala:104:23]
assign next_uop_fp_ctrl_vec = next_uop_out_fp_ctrl_vec; // @[util.scala:104:23]
assign next_uop_rob_idx = next_uop_out_rob_idx; // @[util.scala:104:23]
assign next_uop_ldq_idx = next_uop_out_ldq_idx; // @[util.scala:104:23]
assign next_uop_stq_idx = next_uop_out_stq_idx; // @[util.scala:104:23]
assign next_uop_rxq_idx = next_uop_out_rxq_idx; // @[util.scala:104:23]
assign next_uop_pdst = next_uop_out_pdst; // @[util.scala:104:23]
assign next_uop_prs1 = next_uop_out_prs1; // @[util.scala:104:23]
assign next_uop_prs2 = next_uop_out_prs2; // @[util.scala:104:23]
assign next_uop_prs3 = next_uop_out_prs3; // @[util.scala:104:23]
assign next_uop_ppred = next_uop_out_ppred; // @[util.scala:104:23]
assign next_uop_ppred_busy = next_uop_out_ppred_busy; // @[util.scala:104:23]
assign next_uop_stale_pdst = next_uop_out_stale_pdst; // @[util.scala:104:23]
assign next_uop_exception = next_uop_out_exception; // @[util.scala:104:23]
assign next_uop_exc_cause = next_uop_out_exc_cause; // @[util.scala:104:23]
assign next_uop_mem_cmd = next_uop_out_mem_cmd; // @[util.scala:104:23]
assign next_uop_mem_size = next_uop_out_mem_size; // @[util.scala:104:23]
assign next_uop_mem_signed = next_uop_out_mem_signed; // @[util.scala:104:23]
assign next_uop_uses_ldq = next_uop_out_uses_ldq; // @[util.scala:104:23]
assign next_uop_uses_stq = next_uop_out_uses_stq; // @[util.scala:104:23]
assign next_uop_is_unique = next_uop_out_is_unique; // @[util.scala:104:23]
assign next_uop_flush_on_commit = next_uop_out_flush_on_commit; // @[util.scala:104:23]
assign next_uop_csr_cmd = next_uop_out_csr_cmd; // @[util.scala:104:23]
assign next_uop_ldst_is_rs1 = next_uop_out_ldst_is_rs1; // @[util.scala:104:23]
assign next_uop_ldst = next_uop_out_ldst; // @[util.scala:104:23]
assign next_uop_lrs1 = next_uop_out_lrs1; // @[util.scala:104:23]
assign next_uop_lrs2 = next_uop_out_lrs2; // @[util.scala:104:23]
assign next_uop_lrs3 = next_uop_out_lrs3; // @[util.scala:104:23]
assign next_uop_dst_rtype = next_uop_out_dst_rtype; // @[util.scala:104:23]
assign next_uop_lrs1_rtype = next_uop_out_lrs1_rtype; // @[util.scala:104:23]
assign next_uop_lrs2_rtype = next_uop_out_lrs2_rtype; // @[util.scala:104:23]
assign next_uop_frs3_en = next_uop_out_frs3_en; // @[util.scala:104:23]
assign next_uop_fcn_dw = next_uop_out_fcn_dw; // @[util.scala:104:23]
assign next_uop_fcn_op = next_uop_out_fcn_op; // @[util.scala:104:23]
assign next_uop_fp_val = next_uop_out_fp_val; // @[util.scala:104:23]
assign next_uop_fp_rm = next_uop_out_fp_rm; // @[util.scala:104:23]
assign next_uop_fp_typ = next_uop_out_fp_typ; // @[util.scala:104:23]
assign next_uop_xcpt_pf_if = next_uop_out_xcpt_pf_if; // @[util.scala:104:23]
assign next_uop_xcpt_ae_if = next_uop_out_xcpt_ae_if; // @[util.scala:104:23]
assign next_uop_xcpt_ma_if = next_uop_out_xcpt_ma_if; // @[util.scala:104:23]
assign next_uop_bp_debug_if = next_uop_out_bp_debug_if; // @[util.scala:104:23]
assign next_uop_bp_xcpt_if = next_uop_out_bp_xcpt_if; // @[util.scala:104:23]
assign next_uop_debug_fsrc = next_uop_out_debug_fsrc; // @[util.scala:104:23]
assign next_uop_debug_tsrc = next_uop_out_debug_tsrc; // @[util.scala:104:23]
wire [15:0] _next_uop_out_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:93:27]
assign _next_uop_out_br_mask_T_1 = slot_uop_br_mask & _next_uop_out_br_mask_T; // @[util.scala:93:{25,27}]
assign next_uop_out_br_mask = _next_uop_out_br_mask_T_1; // @[util.scala:93:25, :104:23]
assign io_out_uop_inst_0 = next_uop_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_inst_0 = next_uop_debug_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_rvc_0 = next_uop_is_rvc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_pc_0 = next_uop_debug_pc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_0_0 = next_uop_iq_type_0; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_1_0 = next_uop_iq_type_1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_2_0 = next_uop_iq_type_2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_3_0 = next_uop_iq_type_3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_0_0 = next_uop_fu_code_0; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_1_0 = next_uop_fu_code_1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_2_0 = next_uop_fu_code_2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_3_0 = next_uop_fu_code_3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_4_0 = next_uop_fu_code_4; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_5_0 = next_uop_fu_code_5; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_6_0 = next_uop_fu_code_6; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_7_0 = next_uop_fu_code_7; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_8_0 = next_uop_fu_code_8; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_9_0 = next_uop_fu_code_9; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_issued_0 = next_uop_iw_issued; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_issued_partial_agen_0 = next_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_issued_partial_dgen_0 = next_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p1_speculative_child_0 = next_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p2_speculative_child_0 = next_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p1_bypass_hint_0 = next_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p2_bypass_hint_0 = next_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p3_bypass_hint_0 = next_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_dis_col_sel_0 = next_uop_dis_col_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_mask_0 = next_uop_br_mask; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_tag_0 = next_uop_br_tag; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_type_0 = next_uop_br_type; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sfb_0 = next_uop_is_sfb; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_fence_0 = next_uop_is_fence; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_fencei_0 = next_uop_is_fencei; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sfence_0 = next_uop_is_sfence; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_amo_0 = next_uop_is_amo; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_eret_0 = next_uop_is_eret; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sys_pc2epc_0 = next_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_rocc_0 = next_uop_is_rocc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_mov_0 = next_uop_is_mov; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ftq_idx_0 = next_uop_ftq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_edge_inst_0 = next_uop_edge_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pc_lob_0 = next_uop_pc_lob; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_taken_0 = next_uop_taken; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_rename_0 = next_uop_imm_rename; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_sel_0 = next_uop_imm_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pimm_0 = next_uop_pimm; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_packed_0 = next_uop_imm_packed; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_op1_sel_0 = next_uop_op1_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_op2_sel_0 = next_uop_op2_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ldst_0 = next_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_wen_0 = next_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren1_0 = next_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren2_0 = next_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren3_0 = next_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_swap12_0 = next_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_swap23_0 = next_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_typeTagIn_0 = next_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_typeTagOut_0 = next_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fromint_0 = next_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_toint_0 = next_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fastpipe_0 = next_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fma_0 = next_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_div_0 = next_uop_fp_ctrl_div; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_sqrt_0 = next_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_wflags_0 = next_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_vec_0 = next_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_rob_idx_0 = next_uop_rob_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldq_idx_0 = next_uop_ldq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_stq_idx_0 = next_uop_stq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_rxq_idx_0 = next_uop_rxq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pdst_0 = next_uop_pdst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs1_0 = next_uop_prs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs2_0 = next_uop_prs2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs3_0 = next_uop_prs3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ppred_0 = next_uop_ppred; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs1_busy_0 = next_uop_prs1_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs2_busy_0 = next_uop_prs2_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs3_busy_0 = next_uop_prs3_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ppred_busy_0 = next_uop_ppred_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_stale_pdst_0 = next_uop_stale_pdst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_exception_0 = next_uop_exception; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_exc_cause_0 = next_uop_exc_cause; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_cmd_0 = next_uop_mem_cmd; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_size_0 = next_uop_mem_size; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_signed_0 = next_uop_mem_signed; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_uses_ldq_0 = next_uop_uses_ldq; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_uses_stq_0 = next_uop_uses_stq; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_unique_0 = next_uop_is_unique; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_flush_on_commit_0 = next_uop_flush_on_commit; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_csr_cmd_0 = next_uop_csr_cmd; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldst_is_rs1_0 = next_uop_ldst_is_rs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldst_0 = next_uop_ldst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs1_0 = next_uop_lrs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs2_0 = next_uop_lrs2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs3_0 = next_uop_lrs3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_dst_rtype_0 = next_uop_dst_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs1_rtype_0 = next_uop_lrs1_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs2_rtype_0 = next_uop_lrs2_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_frs3_en_0 = next_uop_frs3_en; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fcn_dw_0 = next_uop_fcn_dw; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fcn_op_0 = next_uop_fcn_op; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_val_0 = next_uop_fp_val; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_rm_0 = next_uop_fp_rm; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_typ_0 = next_uop_fp_typ; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_pf_if_0 = next_uop_xcpt_pf_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_ae_if_0 = next_uop_xcpt_ae_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_ma_if_0 = next_uop_xcpt_ma_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_bp_debug_if_0 = next_uop_bp_debug_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_bp_xcpt_if_0 = next_uop_bp_xcpt_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_fsrc_0 = next_uop_debug_fsrc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_tsrc_0 = next_uop_debug_tsrc; // @[issue-slot.scala:49:7, :59:28]
wire [15:0] _killed_T = io_brupdate_b1_mispredict_mask_0 & slot_uop_br_mask; // @[util.scala:126:51]
wire _killed_T_1 = |_killed_T; // @[util.scala:126:{51,59}]
wire killed = _killed_T_1 | io_kill_0; // @[util.scala:61:61, :126:59]
wire _io_will_be_valid_T = ~killed; // @[util.scala:61:61]
assign _io_will_be_valid_T_1 = next_valid & _io_will_be_valid_T; // @[issue-slot.scala:58:28, :65:{34,37}]
assign io_will_be_valid_0 = _io_will_be_valid_T_1; // @[issue-slot.scala:49:7, :65:34]
wire _slot_valid_T = ~killed; // @[util.scala:61:61]
wire _slot_valid_T_1 = next_valid & _slot_valid_T; // @[issue-slot.scala:58:28, :74:{30,33}] |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_423( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.